2024-12-06 03:45:07,843 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-06 03:45:07,858 main DEBUG Took 0.012884 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-06 03:45:07,858 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-06 03:45:07,858 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-06 03:45:07,859 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-06 03:45:07,861 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,870 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-06 03:45:07,880 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,881 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,882 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,882 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,883 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,884 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,885 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,885 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,886 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,886 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,887 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,887 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,888 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,888 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,888 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,888 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,889 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,889 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,889 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,889 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,890 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,890 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,890 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,891 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 03:45:07,891 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,891 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-06 03:45:07,893 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 03:45:07,895 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-06 03:45:07,897 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-06 03:45:07,897 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-06 03:45:07,899 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-06 03:45:07,899 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-06 03:45:07,909 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-06 03:45:07,912 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-06 03:45:07,914 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-06 03:45:07,915 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-06 03:45:07,915 main DEBUG createAppenders(={Console}) 2024-12-06 03:45:07,916 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-06 03:45:07,917 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-06 03:45:07,917 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-06 03:45:07,918 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-06 03:45:07,918 main DEBUG OutputStream closed 2024-12-06 03:45:07,918 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-06 03:45:07,919 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-06 03:45:07,919 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-06 03:45:08,001 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-06 03:45:08,003 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-06 03:45:08,004 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-06 03:45:08,005 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-06 03:45:08,006 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-06 03:45:08,006 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-06 03:45:08,006 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-06 03:45:08,007 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-06 03:45:08,007 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-06 03:45:08,007 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-06 03:45:08,007 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-06 03:45:08,008 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-06 03:45:08,008 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-06 03:45:08,008 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-06 03:45:08,008 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-06 03:45:08,009 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-06 03:45:08,009 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-06 03:45:08,010 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-06 03:45:08,012 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06 03:45:08,012 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-06 03:45:08,012 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-06 03:45:08,013 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-06T03:45:08,330 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50 2024-12-06 03:45:08,334 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-06 03:45:08,334 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06T03:45:08,346 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-06T03:45:08,377 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=399, ProcessCount=11, AvailableMemoryMB=8833 2024-12-06T03:45:08,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T03:45:08,392 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db, deleteOnExit=true 2024-12-06T03:45:08,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-06T03:45:08,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/test.cache.data in system properties and HBase conf 2024-12-06T03:45:08,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T03:45:08,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.log.dir in system properties and HBase conf 2024-12-06T03:45:08,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T03:45:08,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T03:45:08,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T03:45:08,460 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-06T03:45:08,538 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T03:45:08,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:45:08,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:45:08,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T03:45:08,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:45:08,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T03:45:08,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T03:45:08,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:45:08,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:45:08,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T03:45:08,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/nfs.dump.dir in system properties and HBase conf 2024-12-06T03:45:08,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/java.io.tmpdir in system properties and HBase conf 2024-12-06T03:45:08,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:45:08,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T03:45:08,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T03:45:08,988 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:45:09,514 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-06T03:45:09,579 INFO [Time-limited test {}] log.Log(170): Logging initialized @2443ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-06T03:45:09,650 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:45:09,708 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:45:09,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:45:09,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:45:09,728 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:45:09,740 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:45:09,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:45:09,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:45:09,911 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/java.io.tmpdir/jetty-localhost-45027-hadoop-hdfs-3_4_1-tests_jar-_-any-4639529156131457973/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:45:09,917 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:45027} 2024-12-06T03:45:09,917 INFO [Time-limited test {}] server.Server(415): Started @2782ms 2024-12-06T03:45:09,947 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:45:10,412 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:45:10,418 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:45:10,419 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:45:10,419 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:45:10,420 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:45:10,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:45:10,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:45:10,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59e63bea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/java.io.tmpdir/jetty-localhost-46049-hadoop-hdfs-3_4_1-tests_jar-_-any-8440077432784558327/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:45:10,517 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:46049} 2024-12-06T03:45:10,517 INFO [Time-limited test {}] server.Server(415): Started @3382ms 2024-12-06T03:45:10,565 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:45:10,661 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:45:10,667 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:45:10,669 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:45:10,669 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:45:10,669 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:45:10,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:45:10,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:45:10,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55d18735{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/java.io.tmpdir/jetty-localhost-43511-hadoop-hdfs-3_4_1-tests_jar-_-any-3718569658471100734/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:45:10,786 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:43511} 2024-12-06T03:45:10,786 INFO [Time-limited test {}] server.Server(415): Started @3651ms 2024-12-06T03:45:10,788 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:45:11,747 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/data/data1/current/BP-508770624-172.17.0.2-1733456709070/current, will proceed with Du for space computation calculation, 2024-12-06T03:45:11,747 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/data/data4/current/BP-508770624-172.17.0.2-1733456709070/current, will proceed with Du for space computation calculation, 2024-12-06T03:45:11,747 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/data/data3/current/BP-508770624-172.17.0.2-1733456709070/current, will proceed with Du for space computation calculation, 2024-12-06T03:45:11,747 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/data/data2/current/BP-508770624-172.17.0.2-1733456709070/current, will proceed with Du for space computation calculation, 2024-12-06T03:45:11,773 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:45:11,773 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:45:11,817 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x189ffc6846afd409 with lease ID 0x849af0e8f5376e1: Processing first storage report for DS-072825f3-d731-4995-a8ee-3bac2d033e4a from datanode DatanodeRegistration(127.0.0.1:33917, datanodeUuid=dc95875a-842a-469d-8ec7-28a7d8ccfcb8, infoPort=39853, infoSecurePort=0, ipcPort=33751, storageInfo=lv=-57;cid=testClusterID;nsid=699060208;c=1733456709070) 2024-12-06T03:45:11,817 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x189ffc6846afd409 with lease ID 0x849af0e8f5376e1: from storage DS-072825f3-d731-4995-a8ee-3bac2d033e4a node DatanodeRegistration(127.0.0.1:33917, datanodeUuid=dc95875a-842a-469d-8ec7-28a7d8ccfcb8, infoPort=39853, infoSecurePort=0, ipcPort=33751, storageInfo=lv=-57;cid=testClusterID;nsid=699060208;c=1733456709070), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T03:45:11,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x36f02b94ed528ea7 with lease ID 0x849af0e8f5376e0: Processing first storage report for DS-de658b84-3f61-4eab-a78c-88ec3e8978af from datanode DatanodeRegistration(127.0.0.1:38091, datanodeUuid=13bb1c0d-c025-4a1d-b490-9d93180ea5c0, infoPort=40539, infoSecurePort=0, ipcPort=34747, storageInfo=lv=-57;cid=testClusterID;nsid=699060208;c=1733456709070) 2024-12-06T03:45:11,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x36f02b94ed528ea7 with lease ID 0x849af0e8f5376e0: from storage DS-de658b84-3f61-4eab-a78c-88ec3e8978af node DatanodeRegistration(127.0.0.1:38091, datanodeUuid=13bb1c0d-c025-4a1d-b490-9d93180ea5c0, infoPort=40539, infoSecurePort=0, ipcPort=34747, storageInfo=lv=-57;cid=testClusterID;nsid=699060208;c=1733456709070), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:45:11,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x189ffc6846afd409 with lease ID 0x849af0e8f5376e1: Processing first storage report for DS-b54988de-2f19-416a-b328-a886e97cf081 from datanode DatanodeRegistration(127.0.0.1:33917, datanodeUuid=dc95875a-842a-469d-8ec7-28a7d8ccfcb8, infoPort=39853, infoSecurePort=0, ipcPort=33751, storageInfo=lv=-57;cid=testClusterID;nsid=699060208;c=1733456709070) 2024-12-06T03:45:11,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x189ffc6846afd409 with lease ID 0x849af0e8f5376e1: from storage DS-b54988de-2f19-416a-b328-a886e97cf081 node DatanodeRegistration(127.0.0.1:33917, datanodeUuid=dc95875a-842a-469d-8ec7-28a7d8ccfcb8, infoPort=39853, infoSecurePort=0, ipcPort=33751, storageInfo=lv=-57;cid=testClusterID;nsid=699060208;c=1733456709070), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:45:11,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x36f02b94ed528ea7 with lease ID 0x849af0e8f5376e0: Processing first storage report for DS-0d084eca-9d47-4ae6-8fb4-4462ded54db4 from datanode DatanodeRegistration(127.0.0.1:38091, datanodeUuid=13bb1c0d-c025-4a1d-b490-9d93180ea5c0, infoPort=40539, infoSecurePort=0, ipcPort=34747, storageInfo=lv=-57;cid=testClusterID;nsid=699060208;c=1733456709070) 2024-12-06T03:45:11,819 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x36f02b94ed528ea7 with lease ID 0x849af0e8f5376e0: from storage DS-0d084eca-9d47-4ae6-8fb4-4462ded54db4 node DatanodeRegistration(127.0.0.1:38091, datanodeUuid=13bb1c0d-c025-4a1d-b490-9d93180ea5c0, infoPort=40539, infoSecurePort=0, ipcPort=34747, storageInfo=lv=-57;cid=testClusterID;nsid=699060208;c=1733456709070), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:45:11,887 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50 2024-12-06T03:45:11,944 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/zookeeper_0, clientPort=56906, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T03:45:11,952 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56906 2024-12-06T03:45:11,961 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:45:11,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:45:12,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:45:12,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:45:12,549 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a with version=8 2024-12-06T03:45:12,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/hbase-staging 2024-12-06T03:45:12,620 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-06T03:45:12,806 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:45:12,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:45:12,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:45:12,819 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:45:12,819 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:45:12,819 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:45:12,931 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T03:45:12,978 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-06T03:45:12,985 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-06T03:45:12,988 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:45:13,008 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 23121 (auto-detected) 2024-12-06T03:45:13,009 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-06T03:45:13,025 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40143 2024-12-06T03:45:13,043 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40143 connecting to ZooKeeper ensemble=127.0.0.1:56906 2024-12-06T03:45:13,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:401430x0, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:45:13,183 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40143-0x101aa07e3bf0000 connected 2024-12-06T03:45:13,254 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:45:13,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:45:13,272 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:45:13,276 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a, hbase.cluster.distributed=false 2024-12-06T03:45:13,298 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:45:13,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40143 2024-12-06T03:45:13,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40143 2024-12-06T03:45:13,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40143 2024-12-06T03:45:13,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40143 2024-12-06T03:45:13,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40143 2024-12-06T03:45:13,404 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:45:13,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:45:13,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:45:13,406 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:45:13,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:45:13,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:45:13,410 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T03:45:13,412 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:45:13,413 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33817 2024-12-06T03:45:13,414 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33817 connecting to ZooKeeper ensemble=127.0.0.1:56906 2024-12-06T03:45:13,416 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:45:13,420 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:45:13,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338170x0, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:45:13,445 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:338170x0, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:45:13,445 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33817-0x101aa07e3bf0001 connected 2024-12-06T03:45:13,448 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T03:45:13,455 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T03:45:13,457 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T03:45:13,461 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:45:13,462 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33817 2024-12-06T03:45:13,462 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33817 2024-12-06T03:45:13,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33817 2024-12-06T03:45:13,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33817 2024-12-06T03:45:13,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33817 2024-12-06T03:45:13,479 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6f1b912b0816:40143 2024-12-06T03:45:13,479 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6f1b912b0816,40143,1733456712663 2024-12-06T03:45:13,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:45:13,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:45:13,497 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6f1b912b0816,40143,1733456712663 2024-12-06T03:45:13,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T03:45:13,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:13,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:13,528 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T03:45:13,529 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6f1b912b0816,40143,1733456712663 from backup master directory 2024-12-06T03:45:13,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6f1b912b0816,40143,1733456712663 2024-12-06T03:45:13,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:45:13,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:45:13,536 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:45:13,536 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6f1b912b0816,40143,1733456712663 2024-12-06T03:45:13,538 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-06T03:45:13,539 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-06T03:45:13,595 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/hbase.id] with ID: 7f394fe1-b67c-44ba-830d-fae4fa10799f 2024-12-06T03:45:13,595 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/.tmp/hbase.id 2024-12-06T03:45:13,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:45:13,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:45:13,613 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/.tmp/hbase.id]:[hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/hbase.id] 2024-12-06T03:45:13,653 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:45:13,657 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T03:45:13,674 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-06T03:45:13,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:13,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:13,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:45:13,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:45:13,722 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:45:13,724 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T03:45:13,730 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:45:13,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:45:13,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:45:13,778 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store 2024-12-06T03:45:13,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:45:13,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:45:13,801 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-06T03:45:13,804 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:45:13,806 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:45:13,806 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:45:13,807 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:45:13,809 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:45:13,809 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:45:13,809 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:45:13,812 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456713806Disabling compacts and flushes for region at 1733456713806Disabling writes for close at 1733456713809 (+3 ms)Writing region close event to WAL at 1733456713809Closed at 1733456713809 2024-12-06T03:45:13,814 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/.initializing 2024-12-06T03:45:13,814 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/WALs/6f1b912b0816,40143,1733456712663 2024-12-06T03:45:13,837 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C40143%2C1733456712663, suffix=, logDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/WALs/6f1b912b0816,40143,1733456712663, archiveDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/oldWALs, maxLogs=10 2024-12-06T03:45:13,848 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C40143%2C1733456712663.1733456713843 2024-12-06T03:45:13,868 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/WALs/6f1b912b0816,40143,1733456712663/6f1b912b0816%2C40143%2C1733456712663.1733456713843 2024-12-06T03:45:13,875 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39853:39853),(127.0.0.1/127.0.0.1:40539:40539)] 2024-12-06T03:45:13,877 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:45:13,877 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:45:13,880 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,881 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,935 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T03:45:13,937 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:13,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:45:13,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,943 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T03:45:13,943 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:13,944 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:45:13,944 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T03:45:13,947 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:13,948 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:45:13,948 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,950 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T03:45:13,951 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:13,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:45:13,952 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,956 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,957 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,963 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,963 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,967 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T03:45:13,971 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:45:13,975 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:45:13,977 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870571, jitterRate=0.1069883406162262}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T03:45:13,983 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733456713893Initializing all the Stores at 1733456713895 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456713896 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456713896Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456713897 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456713897Cleaning up temporary data from old regions at 1733456713963 (+66 ms)Region opened successfully at 1733456713983 (+20 ms) 2024-12-06T03:45:13,984 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T03:45:14,015 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ae66e14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:45:14,041 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T03:45:14,050 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T03:45:14,050 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T03:45:14,053 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T03:45:14,054 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-06T03:45:14,058 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-06T03:45:14,059 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T03:45:14,085 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T03:45:14,093 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T03:45:14,126 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T03:45:14,131 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T03:45:14,133 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T03:45:14,143 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T03:45:14,146 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T03:45:14,150 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T03:45:14,160 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T03:45:14,162 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T03:45:14,168 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T03:45:14,188 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T03:45:14,193 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T03:45:14,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:45:14,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:45:14,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:14,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:14,205 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6f1b912b0816,40143,1733456712663, sessionid=0x101aa07e3bf0000, setting cluster-up flag (Was=false) 2024-12-06T03:45:14,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:14,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:14,393 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T03:45:14,395 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,40143,1733456712663 2024-12-06T03:45:14,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:14,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:14,443 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T03:45:14,445 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,40143,1733456712663 2024-12-06T03:45:14,452 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T03:45:14,469 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(746): ClusterId : 7f394fe1-b67c-44ba-830d-fae4fa10799f 2024-12-06T03:45:14,472 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T03:45:14,488 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T03:45:14,488 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T03:45:14,495 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T03:45:14,496 DEBUG [RS:0;6f1b912b0816:33817 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@352cc204, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:45:14,516 DEBUG [RS:0;6f1b912b0816:33817 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6f1b912b0816:33817 2024-12-06T03:45:14,519 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T03:45:14,520 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T03:45:14,520 DEBUG [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T03:45:14,523 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(2659): reportForDuty to master=6f1b912b0816,40143,1733456712663 with port=33817, startcode=1733456713370 2024-12-06T03:45:14,535 DEBUG [RS:0;6f1b912b0816:33817 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T03:45:14,538 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T03:45:14,550 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T03:45:14,559 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T03:45:14,565 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6f1b912b0816,40143,1733456712663 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T03:45:14,577 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:45:14,578 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:45:14,579 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:45:14,579 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:45:14,579 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6f1b912b0816:0, corePoolSize=10, maxPoolSize=10 2024-12-06T03:45:14,579 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,580 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:45:14,580 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,593 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:45:14,593 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T03:45:14,594 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733456744594 2024-12-06T03:45:14,596 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T03:45:14,597 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T03:45:14,601 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T03:45:14,601 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:14,601 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T03:45:14,601 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T03:45:14,602 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T03:45:14,602 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T03:45:14,607 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,610 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T03:45:14,611 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T03:45:14,612 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T03:45:14,619 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T03:45:14,619 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T03:45:14,620 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33547, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T03:45:14,628 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40143 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-06T03:45:14,634 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456714621,5,FailOnTimeoutGroup] 2024-12-06T03:45:14,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:45:14,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:45:14,643 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456714635,5,FailOnTimeoutGroup] 2024-12-06T03:45:14,644 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,644 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T03:45:14,645 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T03:45:14,645 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,646 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,646 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a 2024-12-06T03:45:14,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:45:14,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:45:14,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:45:14,670 DEBUG [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-06T03:45:14,670 WARN [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-06T03:45:14,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:45:14,674 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:45:14,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:14,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:45:14,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:45:14,678 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:45:14,678 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:14,680 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:45:14,680 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:45:14,684 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:45:14,684 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:14,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:45:14,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:45:14,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:45:14,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:14,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:45:14,692 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:45:14,694 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740 2024-12-06T03:45:14,694 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740 2024-12-06T03:45:14,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:45:14,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:45:14,698 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:45:14,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:45:14,710 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:45:14,711 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700154, jitterRate=-0.10970833897590637}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:45:14,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733456714668Initializing all the Stores at 1733456714671 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456714671Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456714671Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456714671Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456714671Cleaning up temporary data from old regions at 1733456714697 (+26 ms)Region opened successfully at 1733456714714 (+17 ms) 2024-12-06T03:45:14,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:45:14,714 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:45:14,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:45:14,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:45:14,714 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:45:14,719 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:45:14,719 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456714714Disabling compacts and flushes for region at 1733456714714Disabling writes for close at 1733456714714Writing region close event to WAL at 1733456714718 (+4 ms)Closed at 1733456714719 (+1 ms) 2024-12-06T03:45:14,723 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:45:14,723 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T03:45:14,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T03:45:14,737 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:45:14,740 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T03:45:14,771 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(2659): reportForDuty to master=6f1b912b0816,40143,1733456712663 with port=33817, startcode=1733456713370 2024-12-06T03:45:14,773 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40143 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6f1b912b0816,33817,1733456713370 2024-12-06T03:45:14,776 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40143 {}] master.ServerManager(517): Registering regionserver=6f1b912b0816,33817,1733456713370 2024-12-06T03:45:14,783 DEBUG [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a 2024-12-06T03:45:14,783 DEBUG [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37783 2024-12-06T03:45:14,783 DEBUG [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T03:45:14,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:45:14,811 DEBUG [RS:0;6f1b912b0816:33817 {}] zookeeper.ZKUtil(111): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6f1b912b0816,33817,1733456713370 2024-12-06T03:45:14,811 WARN [RS:0;6f1b912b0816:33817 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:45:14,811 INFO [RS:0;6f1b912b0816:33817 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:45:14,811 DEBUG [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370 2024-12-06T03:45:14,815 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6f1b912b0816,33817,1733456713370] 2024-12-06T03:45:14,843 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T03:45:14,860 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T03:45:14,866 INFO [RS:0;6f1b912b0816:33817 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T03:45:14,866 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,871 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T03:45:14,878 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T03:45:14,880 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,880 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,880 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,880 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,881 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,881 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,881 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:45:14,881 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,881 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,881 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,882 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,882 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,882 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:45:14,882 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:45:14,882 DEBUG [RS:0;6f1b912b0816:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:45:14,884 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,884 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,884 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,884 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,884 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,884 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33817,1733456713370-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:45:14,891 WARN [6f1b912b0816:40143 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-06T03:45:14,901 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T03:45:14,903 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33817,1733456713370-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,904 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,904 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.Replication(171): 6f1b912b0816,33817,1733456713370 started 2024-12-06T03:45:14,921 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:14,921 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(1482): Serving as 6f1b912b0816,33817,1733456713370, RpcServer on 6f1b912b0816/172.17.0.2:33817, sessionid=0x101aa07e3bf0001 2024-12-06T03:45:14,922 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T03:45:14,922 DEBUG [RS:0;6f1b912b0816:33817 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6f1b912b0816,33817,1733456713370 2024-12-06T03:45:14,922 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,33817,1733456713370' 2024-12-06T03:45:14,922 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T03:45:14,924 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T03:45:14,924 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T03:45:14,924 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T03:45:14,925 DEBUG [RS:0;6f1b912b0816:33817 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6f1b912b0816,33817,1733456713370 2024-12-06T03:45:14,925 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,33817,1733456713370' 2024-12-06T03:45:14,925 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T03:45:14,925 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T03:45:14,926 DEBUG [RS:0;6f1b912b0816:33817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T03:45:14,926 INFO [RS:0;6f1b912b0816:33817 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T03:45:14,926 INFO [RS:0;6f1b912b0816:33817 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T03:45:15,041 INFO [RS:0;6f1b912b0816:33817 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C33817%2C1733456713370, suffix=, logDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370, archiveDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs, maxLogs=32 2024-12-06T03:45:15,044 INFO [RS:0;6f1b912b0816:33817 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.1733456715044 2024-12-06T03:45:15,052 INFO [RS:0;6f1b912b0816:33817 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456715044 2024-12-06T03:45:15,055 DEBUG [RS:0;6f1b912b0816:33817 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39853:39853),(127.0.0.1/127.0.0.1:40539:40539)] 2024-12-06T03:45:15,143 DEBUG [6f1b912b0816:40143 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T03:45:15,155 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6f1b912b0816,33817,1733456713370 2024-12-06T03:45:15,161 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,33817,1733456713370, state=OPENING 2024-12-06T03:45:15,218 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T03:45:15,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:15,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:45:15,228 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:45:15,228 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:45:15,230 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:45:15,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,33817,1733456713370}] 2024-12-06T03:45:15,412 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T03:45:15,415 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52147, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T03:45:15,425 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T03:45:15,425 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:45:15,429 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C33817%2C1733456713370.meta, suffix=.meta, logDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370, archiveDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs, maxLogs=32 2024-12-06T03:45:15,430 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.meta.1733456715430.meta 2024-12-06T03:45:15,437 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.meta.1733456715430.meta 2024-12-06T03:45:15,438 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39853:39853),(127.0.0.1/127.0.0.1:40539:40539)] 2024-12-06T03:45:15,439 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:45:15,441 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T03:45:15,443 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T03:45:15,446 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T03:45:15,450 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T03:45:15,450 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:45:15,450 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T03:45:15,450 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T03:45:15,453 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:45:15,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:45:15,455 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:15,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:45:15,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:45:15,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:45:15,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:15,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:45:15,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:45:15,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:45:15,460 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:15,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:45:15,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:45:15,462 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:45:15,462 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:15,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:45:15,463 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:45:15,464 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740 2024-12-06T03:45:15,466 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740 2024-12-06T03:45:15,469 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:45:15,469 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:45:15,470 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:45:15,472 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:45:15,473 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835261, jitterRate=0.0620894730091095}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:45:15,473 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T03:45:15,474 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733456715451Writing region info on filesystem at 1733456715451Initializing all the Stores at 1733456715453 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456715453Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456715453Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456715453Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456715453Cleaning up temporary data from old regions at 1733456715469 (+16 ms)Running coprocessor post-open hooks at 1733456715473 (+4 ms)Region opened successfully at 1733456715474 (+1 ms) 2024-12-06T03:45:15,480 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733456715405 2024-12-06T03:45:15,489 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T03:45:15,489 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T03:45:15,491 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,33817,1733456713370 2024-12-06T03:45:15,492 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,33817,1733456713370, state=OPEN 2024-12-06T03:45:15,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:45:15,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:45:15,578 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:45:15,578 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:45:15,578 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6f1b912b0816,33817,1733456713370 2024-12-06T03:45:15,586 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T03:45:15,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,33817,1733456713370 in 347 msec 2024-12-06T03:45:15,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T03:45:15,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 859 msec 2024-12-06T03:45:15,595 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:45:15,595 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T03:45:15,612 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:45:15,613 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,33817,1733456713370, seqNum=-1] 2024-12-06T03:45:15,629 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:45:15,631 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51677, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:45:15,649 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1590 sec 2024-12-06T03:45:15,649 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733456715649, completionTime=-1 2024-12-06T03:45:15,652 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T03:45:15,652 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T03:45:15,676 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-06T03:45:15,676 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733456775676 2024-12-06T03:45:15,676 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733456835676 2024-12-06T03:45:15,676 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 24 msec 2024-12-06T03:45:15,680 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,40143,1733456712663-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:15,680 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,40143,1733456712663-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:15,680 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,40143,1733456712663-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:15,681 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6f1b912b0816:40143, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:15,682 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:15,682 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:15,687 DEBUG [master/6f1b912b0816:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T03:45:15,713 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.177sec 2024-12-06T03:45:15,714 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T03:45:15,715 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T03:45:15,716 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T03:45:15,717 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T03:45:15,717 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T03:45:15,718 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,40143,1733456712663-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:45:15,718 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,40143,1733456712663-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T03:45:15,727 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T03:45:15,729 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T03:45:15,730 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,40143,1733456712663-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:45:15,779 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a88365d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:45:15,782 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-06T03:45:15,782 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-06T03:45:15,785 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6f1b912b0816,40143,-1 for getting cluster id 2024-12-06T03:45:15,787 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T03:45:15,795 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7f394fe1-b67c-44ba-830d-fae4fa10799f' 2024-12-06T03:45:15,797 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T03:45:15,797 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7f394fe1-b67c-44ba-830d-fae4fa10799f" 2024-12-06T03:45:15,798 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fa794bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:45:15,798 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6f1b912b0816,40143,-1] 2024-12-06T03:45:15,800 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T03:45:15,802 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:45:15,804 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58570, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T03:45:15,807 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1637bc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:45:15,808 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:45:15,815 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,33817,1733456713370, seqNum=-1] 2024-12-06T03:45:15,815 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:45:15,818 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55718, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:45:15,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6f1b912b0816,40143,1733456712663 2024-12-06T03:45:15,840 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:45:15,846 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-06T03:45:15,850 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-06T03:45:15,854 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 6f1b912b0816,40143,1733456712663 2024-12-06T03:45:15,856 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@54432716 2024-12-06T03:45:15,857 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T03:45:15,859 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58578, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T03:45:15,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40143 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T03:45:15,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40143 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T03:45:15,866 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40143 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:45:15,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40143 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-06T03:45:15,891 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T03:45:15,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40143 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-06T03:45:15,893 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:15,896 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T03:45:15,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40143 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:45:15,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741835_1011 (size=389) 2024-12-06T03:45:15,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741835_1011 (size=389) 2024-12-06T03:45:16,340 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 89628604b4aad32f600e933e1f735828, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a 2024-12-06T03:45:16,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741836_1012 (size=72) 2024-12-06T03:45:16,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741836_1012 (size=72) 2024-12-06T03:45:16,357 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:45:16,357 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 89628604b4aad32f600e933e1f735828, disabling compactions & flushes 2024-12-06T03:45:16,357 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:45:16,358 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:45:16,358 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. after waiting 0 ms 2024-12-06T03:45:16,358 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:45:16,358 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:45:16,358 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 89628604b4aad32f600e933e1f735828: Waiting for close lock at 1733456716357Disabling compacts and flushes for region at 1733456716357Disabling writes for close at 1733456716358 (+1 ms)Writing region close event to WAL at 1733456716358Closed at 1733456716358 2024-12-06T03:45:16,360 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T03:45:16,364 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733456716360"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733456716360"}]},"ts":"1733456716360"} 2024-12-06T03:45:16,369 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-06T03:45:16,371 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T03:45:16,373 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456716371"}]},"ts":"1733456716371"} 2024-12-06T03:45:16,377 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-06T03:45:16,379 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=89628604b4aad32f600e933e1f735828, ASSIGN}] 2024-12-06T03:45:16,381 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=89628604b4aad32f600e933e1f735828, ASSIGN 2024-12-06T03:45:16,383 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=89628604b4aad32f600e933e1f735828, ASSIGN; state=OFFLINE, location=6f1b912b0816,33817,1733456713370; forceNewPlan=false, retain=false 2024-12-06T03:45:16,536 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=89628604b4aad32f600e933e1f735828, regionState=OPENING, regionLocation=6f1b912b0816,33817,1733456713370 2024-12-06T03:45:16,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=89628604b4aad32f600e933e1f735828, ASSIGN because future has completed 2024-12-06T03:45:16,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 89628604b4aad32f600e933e1f735828, server=6f1b912b0816,33817,1733456713370}] 2024-12-06T03:45:16,715 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:45:16,716 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 89628604b4aad32f600e933e1f735828, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:45:16,716 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,716 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:45:16,717 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,717 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,719 INFO [StoreOpener-89628604b4aad32f600e933e1f735828-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,722 INFO [StoreOpener-89628604b4aad32f600e933e1f735828-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 89628604b4aad32f600e933e1f735828 columnFamilyName info 2024-12-06T03:45:16,722 DEBUG [StoreOpener-89628604b4aad32f600e933e1f735828-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:45:16,723 INFO [StoreOpener-89628604b4aad32f600e933e1f735828-1 {}] regionserver.HStore(327): Store=89628604b4aad32f600e933e1f735828/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:45:16,723 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,725 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,725 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,726 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,726 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,729 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,733 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:45:16,734 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 89628604b4aad32f600e933e1f735828; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873306, jitterRate=0.11046649515628815}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T03:45:16,734 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:16,735 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 89628604b4aad32f600e933e1f735828: Running coprocessor pre-open hook at 1733456716717Writing region info on filesystem at 1733456716717Initializing all the Stores at 1733456716719 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456716719Cleaning up temporary data from old regions at 1733456716726 (+7 ms)Running coprocessor post-open hooks at 1733456716734 (+8 ms)Region opened successfully at 1733456716735 (+1 ms) 2024-12-06T03:45:16,737 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828., pid=6, masterSystemTime=1733456716703 2024-12-06T03:45:16,741 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:45:16,741 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:45:16,742 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=89628604b4aad32f600e933e1f735828, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,33817,1733456713370 2024-12-06T03:45:16,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 89628604b4aad32f600e933e1f735828, server=6f1b912b0816,33817,1733456713370 because future has completed 2024-12-06T03:45:16,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T03:45:16,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 89628604b4aad32f600e933e1f735828, server=6f1b912b0816,33817,1733456713370 in 200 msec 2024-12-06T03:45:16,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T03:45:16,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=89628604b4aad32f600e933e1f735828, ASSIGN in 373 msec 2024-12-06T03:45:16,758 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T03:45:16,758 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456716758"}]},"ts":"1733456716758"} 2024-12-06T03:45:16,762 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-06T03:45:16,764 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T03:45:16,767 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 895 msec 2024-12-06T03:45:20,966 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T03:45:21,010 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T03:45:21,011 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-06T03:45:22,976 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:45:22,976 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-06T03:45:22,980 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-06T03:45:22,980 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-06T03:45:22,983 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:45:22,983 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-06T03:45:22,984 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T03:45:22,984 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-06T03:45:25,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40143 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:45:25,983 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-06T03:45:25,986 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-06T03:45:25,994 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-06T03:45:25,995 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:45:25,996 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.1733456725996 2024-12-06T03:45:26,005 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:26,005 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:26,005 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:26,005 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:26,005 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:26,006 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456715044 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456725996 2024-12-06T03:45:26,008 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40539:40539),(127.0.0.1/127.0.0.1:39853:39853)] 2024-12-06T03:45:26,008 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456715044 is not closed yet, will try archiving it next time 2024-12-06T03:45:26,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741833_1009 (size=451) 2024-12-06T03:45:26,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741833_1009 (size=451) 2024-12-06T03:45:26,012 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456715044 to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs/6f1b912b0816%2C33817%2C1733456713370.1733456715044 2024-12-06T03:45:26,017 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828., hostname=6f1b912b0816,33817,1733456713370, seqNum=2] 2024-12-06T03:45:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33817 {}] regionserver.HRegion(8855): Flush requested on 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:38,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89628604b4aad32f600e933e1f735828 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:45:38,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/e47ab42376d44149887c52424ebc4085 is 1080, key is row0001/info:/1733456726020/Put/seqid=0 2024-12-06T03:45:38,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741838_1014 (size=12509) 2024-12-06T03:45:38,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741838_1014 (size=12509) 2024-12-06T03:45:38,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/e47ab42376d44149887c52424ebc4085 2024-12-06T03:45:38,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/e47ab42376d44149887c52424ebc4085 as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/e47ab42376d44149887c52424ebc4085 2024-12-06T03:45:38,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/e47ab42376d44149887c52424ebc4085, entries=7, sequenceid=11, filesize=12.2 K 2024-12-06T03:45:38,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 89628604b4aad32f600e933e1f735828 in 121ms, sequenceid=11, compaction requested=false 2024-12-06T03:45:38,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89628604b4aad32f600e933e1f735828: 2024-12-06T03:45:41,883 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T03:45:46,082 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.1733456746081 2024-12-06T03:45:46,299 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK]] 2024-12-06T03:45:46,299 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:46,299 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:46,299 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:46,299 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:46,299 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:46,300 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456725996 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456746081 2024-12-06T03:45:46,300 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39853:39853),(127.0.0.1/127.0.0.1:40539:40539)] 2024-12-06T03:45:46,300 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456725996 is not closed yet, will try archiving it next time 2024-12-06T03:45:46,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741837_1013 (size=12399) 2024-12-06T03:45:46,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741837_1013 (size=12399) 2024-12-06T03:45:46,505 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:48,713 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:50,918 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:53,122 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:53,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33817 {}] regionserver.HRegion(8855): Flush requested on 89628604b4aad32f600e933e1f735828 2024-12-06T03:45:53,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89628604b4aad32f600e933e1f735828 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:45:53,324 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:53,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/317e7c5774454df3a538df4b56f3295c is 1080, key is row0008/info:/1733456740064/Put/seqid=0 2024-12-06T03:45:53,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741840_1016 (size=12509) 2024-12-06T03:45:53,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741840_1016 (size=12509) 2024-12-06T03:45:53,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/317e7c5774454df3a538df4b56f3295c 2024-12-06T03:45:53,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/317e7c5774454df3a538df4b56f3295c as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/317e7c5774454df3a538df4b56f3295c 2024-12-06T03:45:53,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/317e7c5774454df3a538df4b56f3295c, entries=7, sequenceid=21, filesize=12.2 K 2024-12-06T03:45:53,583 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:53,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 89628604b4aad32f600e933e1f735828 in 461ms, sequenceid=21, compaction requested=false 2024-12-06T03:45:53,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89628604b4aad32f600e933e1f735828: 2024-12-06T03:45:53,584 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-06T03:45:53,584 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:45:53,585 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/e47ab42376d44149887c52424ebc4085 because midkey is the same as first or last row 2024-12-06T03:45:55,327 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:55,731 INFO [master/6f1b912b0816:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T03:45:55,731 INFO [master/6f1b912b0816:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T03:45:57,531 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:57,533 WARN [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:57,534 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C33817%2C1733456713370:(num 1733456746081) roll requested 2024-12-06T03:45:57,535 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.1733456757534 2024-12-06T03:45:57,742 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:45:57,742 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:57,743 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:57,743 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:57,743 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:57,743 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:45:57,743 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456746081 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456757534 2024-12-06T03:45:57,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741839_1015 (size=7739) 2024-12-06T03:45:57,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741839_1015 (size=7739) 2024-12-06T03:45:57,749 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39853:39853),(127.0.0.1/127.0.0.1:40539:40539)] 2024-12-06T03:45:57,749 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456746081 is not closed yet, will try archiving it next time 2024-12-06T03:45:57,749 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456725996 to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs/6f1b912b0816%2C33817%2C1733456713370.1733456725996 2024-12-06T03:45:59,736 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:01,717 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 89628604b4aad32f600e933e1f735828, had cached 0 bytes from a total of 25018 2024-12-06T03:46:01,942 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:04,187 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 240 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:06,391 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:08,393 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T03:46:08,393 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.1733456768393 2024-12-06T03:46:11,883 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T03:46:13,403 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:13,405 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:13,405 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C33817%2C1733456713370:(num 1733456768393) roll requested 2024-12-06T03:46:13,405 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:13,406 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:13,406 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:13,406 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:13,406 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:13,406 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456757534 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456768393 2024-12-06T03:46:13,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741841_1017 (size=4753) 2024-12-06T03:46:13,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741841_1017 (size=4753) 2024-12-06T03:46:13,418 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39853:39853),(127.0.0.1/127.0.0.1:40539:40539)] 2024-12-06T03:46:13,418 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456757534 is not closed yet, will try archiving it next time 2024-12-06T03:46:13,418 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.1733456773418 2024-12-06T03:46:18,421 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:18,422 WARN [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:18,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33817 {}] regionserver.HRegion(8855): Flush requested on 89628604b4aad32f600e933e1f735828 2024-12-06T03:46:18,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89628604b4aad32f600e933e1f735828 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:46:18,428 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:18,429 WARN [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:20,423 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T03:46:23,426 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:23,427 WARN [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK]] 2024-12-06T03:46:23,427 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:23,428 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:23,428 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:23,429 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:23,429 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:23,430 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456768393 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456773418 2024-12-06T03:46:23,433 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40539:40539),(127.0.0.1/127.0.0.1:39853:39853)] 2024-12-06T03:46:23,433 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456768393 is not closed yet, will try archiving it next time 2024-12-06T03:46:23,433 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C33817%2C1733456713370:(num 1733456773418) roll requested 2024-12-06T03:46:23,434 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.1733456783433 2024-12-06T03:46:23,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741842_1018 (size=1569) 2024-12-06T03:46:23,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741842_1018 (size=1569) 2024-12-06T03:46:23,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/53b20a2dd3ed4f02af02bf30d7348b8d is 1080, key is row0015/info:/1733456755124/Put/seqid=0 2024-12-06T03:46:23,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741844_1020 (size=12509) 2024-12-06T03:46:23,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741844_1020 (size=12509) 2024-12-06T03:46:23,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/53b20a2dd3ed4f02af02bf30d7348b8d 2024-12-06T03:46:23,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/53b20a2dd3ed4f02af02bf30d7348b8d as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/53b20a2dd3ed4f02af02bf30d7348b8d 2024-12-06T03:46:23,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/53b20a2dd3ed4f02af02bf30d7348b8d, entries=7, sequenceid=31, filesize=12.2 K 2024-12-06T03:46:28,446 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK]] 2024-12-06T03:46:28,446 WARN [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK]] 2024-12-06T03:46:28,465 INFO [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK]] 2024-12-06T03:46:28,465 WARN [FSHLog-0-hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a-prefix:6f1b912b0816,33817,1733456713370 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38091,DS-de658b84-3f61-4eab-a78c-88ec3e8978af,DISK], DatanodeInfoWithStorage[127.0.0.1:33917,DS-072825f3-d731-4995-a8ee-3bac2d033e4a,DISK]] 2024-12-06T03:46:28,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 89628604b4aad32f600e933e1f735828 in 10043ms, sequenceid=31, compaction requested=true 2024-12-06T03:46:28,465 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89628604b4aad32f600e933e1f735828: 2024-12-06T03:46:28,465 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,465 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,466 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-06T03:46:28,466 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:46:28,466 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,466 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/e47ab42376d44149887c52424ebc4085 because midkey is the same as first or last row 2024-12-06T03:46:28,466 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,466 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456773418 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456783433 2024-12-06T03:46:28,468 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40539:40539),(127.0.0.1/127.0.0.1:39853:39853)] 2024-12-06T03:46:28,468 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456773418 is not closed yet, will try archiving it next time 2024-12-06T03:46:28,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 89628604b4aad32f600e933e1f735828:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:46:28,469 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456746081 to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs/6f1b912b0816%2C33817%2C1733456713370.1733456746081 2024-12-06T03:46:28,469 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C33817%2C1733456713370:(num 1733456788469) roll requested 2024-12-06T03:46:28,469 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.1733456788469 2024-12-06T03:46:28,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741843_1019 (size=438) 2024-12-06T03:46:28,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:46:28,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741843_1019 (size=438) 2024-12-06T03:46:28,473 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456757534 to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs/6f1b912b0816%2C33817%2C1733456713370.1733456757534 2024-12-06T03:46:28,473 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:46:28,474 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456768393 to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs/6f1b912b0816%2C33817%2C1733456713370.1733456768393 2024-12-06T03:46:28,476 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456773418 to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs/6f1b912b0816%2C33817%2C1733456713370.1733456773418 2024-12-06T03:46:28,476 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:46:28,478 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.HStore(1541): 89628604b4aad32f600e933e1f735828/info is initiating minor compaction (all files) 2024-12-06T03:46:28,478 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,478 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,478 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,479 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,479 INFO [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 89628604b4aad32f600e933e1f735828/info in TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:46:28,479 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,479 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456783433 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456788469 2024-12-06T03:46:28,479 INFO [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/e47ab42376d44149887c52424ebc4085, hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/317e7c5774454df3a538df4b56f3295c, hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/53b20a2dd3ed4f02af02bf30d7348b8d] into tmpdir=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp, totalSize=36.6 K 2024-12-06T03:46:28,480 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] compactions.Compactor(225): Compacting e47ab42376d44149887c52424ebc4085, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733456726020 2024-12-06T03:46:28,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741845_1021 (size=93) 2024-12-06T03:46:28,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741845_1021 (size=93) 2024-12-06T03:46:28,482 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 317e7c5774454df3a538df4b56f3295c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733456740064 2024-12-06T03:46:28,482 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456783433 to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs/6f1b912b0816%2C33817%2C1733456713370.1733456783433 2024-12-06T03:46:28,483 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] compactions.Compactor(225): Compacting 53b20a2dd3ed4f02af02bf30d7348b8d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733456755124 2024-12-06T03:46:28,487 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40539:40539),(127.0.0.1/127.0.0.1:39853:39853)] 2024-12-06T03:46:28,487 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33817%2C1733456713370.1733456788487 2024-12-06T03:46:28,495 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,495 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,495 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,495 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,495 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:28,496 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456788469 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456788487 2024-12-06T03:46:28,497 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39853:39853),(127.0.0.1/127.0.0.1:40539:40539)] 2024-12-06T03:46:28,497 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/WALs/6f1b912b0816,33817,1733456713370/6f1b912b0816%2C33817%2C1733456713370.1733456788469 is not closed yet, will try archiving it next time 2024-12-06T03:46:28,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741846_1022 (size=1258) 2024-12-06T03:46:28,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741846_1022 (size=1258) 2024-12-06T03:46:28,512 INFO [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 89628604b4aad32f600e933e1f735828#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:46:28,513 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/1261347fb20648348c03f3ffe5cdc2af is 1080, key is row0001/info:/1733456726020/Put/seqid=0 2024-12-06T03:46:28,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741848_1024 (size=27710) 2024-12-06T03:46:28,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741848_1024 (size=27710) 2024-12-06T03:46:28,536 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/1261347fb20648348c03f3ffe5cdc2af as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/1261347fb20648348c03f3ffe5cdc2af 2024-12-06T03:46:28,556 INFO [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 89628604b4aad32f600e933e1f735828/info of 89628604b4aad32f600e933e1f735828 into 1261347fb20648348c03f3ffe5cdc2af(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:46:28,556 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 89628604b4aad32f600e933e1f735828: 2024-12-06T03:46:28,558 INFO [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828., storeName=89628604b4aad32f600e933e1f735828/info, priority=13, startTime=1733456788468; duration=0sec 2024-12-06T03:46:28,559 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-06T03:46:28,559 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:46:28,559 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/1261347fb20648348c03f3ffe5cdc2af because midkey is the same as first or last row 2024-12-06T03:46:28,559 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-06T03:46:28,559 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:46:28,559 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/1261347fb20648348c03f3ffe5cdc2af because midkey is the same as first or last row 2024-12-06T03:46:28,560 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-06T03:46:28,560 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:46:28,560 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/1261347fb20648348c03f3ffe5cdc2af because midkey is the same as first or last row 2024-12-06T03:46:28,560 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:46:28,560 DEBUG [RS:0;6f1b912b0816:33817-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 89628604b4aad32f600e933e1f735828:info 2024-12-06T03:46:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33817 {}] regionserver.HRegion(8855): Flush requested on 89628604b4aad32f600e933e1f735828 2024-12-06T03:46:40,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 89628604b4aad32f600e933e1f735828 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:46:40,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/ac3b5cf0a91340cb8036cb99018914ff is 1080, key is row0022/info:/1733456788488/Put/seqid=0 2024-12-06T03:46:40,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741849_1025 (size=12509) 2024-12-06T03:46:40,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741849_1025 (size=12509) 2024-12-06T03:46:40,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/ac3b5cf0a91340cb8036cb99018914ff 2024-12-06T03:46:40,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/ac3b5cf0a91340cb8036cb99018914ff as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/ac3b5cf0a91340cb8036cb99018914ff 2024-12-06T03:46:40,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/ac3b5cf0a91340cb8036cb99018914ff, entries=7, sequenceid=42, filesize=12.2 K 2024-12-06T03:46:40,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 89628604b4aad32f600e933e1f735828 in 44ms, sequenceid=42, compaction requested=false 2024-12-06T03:46:40,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 89628604b4aad32f600e933e1f735828: 2024-12-06T03:46:40,567 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-06T03:46:40,567 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:46:40,568 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/1261347fb20648348c03f3ffe5cdc2af because midkey is the same as first or last row 2024-12-06T03:46:41,883 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T03:46:46,717 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 89628604b4aad32f600e933e1f735828, had cached 0 bytes from a total of 40219 2024-12-06T03:46:48,537 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T03:46:48,537 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:46:48,537 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:46:48,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:48,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:48,544 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T03:46:48,544 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T03:46:48,544 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=523817283, stopped=false 2024-12-06T03:46:48,544 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6f1b912b0816,40143,1733456712663 2024-12-06T03:46:48,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:46:48,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:46:48,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:48,598 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:46:48,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:48,598 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:46:48,599 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:46:48,599 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:48,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:46:48,599 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6f1b912b0816,33817,1733456713370' ***** 2024-12-06T03:46:48,600 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T03:46:48,600 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:46:48,600 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T03:46:48,600 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T03:46:48,601 INFO [RS:0;6f1b912b0816:33817 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T03:46:48,601 INFO [RS:0;6f1b912b0816:33817 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T03:46:48,601 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(3091): Received CLOSE for 89628604b4aad32f600e933e1f735828 2024-12-06T03:46:48,602 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(959): stopping server 6f1b912b0816,33817,1733456713370 2024-12-06T03:46:48,602 INFO [RS:0;6f1b912b0816:33817 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:46:48,602 INFO [RS:0;6f1b912b0816:33817 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6f1b912b0816:33817. 2024-12-06T03:46:48,603 DEBUG [RS:0;6f1b912b0816:33817 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:46:48,603 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 89628604b4aad32f600e933e1f735828, disabling compactions & flushes 2024-12-06T03:46:48,603 DEBUG [RS:0;6f1b912b0816:33817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:48,603 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:46:48,603 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:46:48,603 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. after waiting 0 ms 2024-12-06T03:46:48,603 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T03:46:48,603 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:46:48,603 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T03:46:48,603 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T03:46:48,603 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T03:46:48,603 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 89628604b4aad32f600e933e1f735828 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-06T03:46:48,604 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-06T03:46:48,604 DEBUG [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 89628604b4aad32f600e933e1f735828=TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.} 2024-12-06T03:46:48,604 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:46:48,604 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:46:48,605 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:46:48,605 DEBUG [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 89628604b4aad32f600e933e1f735828 2024-12-06T03:46:48,605 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:46:48,605 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:46:48,605 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-06T03:46:48,610 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/b931220ea5954ebf8322376cbeb61c00 is 1080, key is row0029/info:/1733456802526/Put/seqid=0 2024-12-06T03:46:48,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741850_1026 (size=8193) 2024-12-06T03:46:48,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741850_1026 (size=8193) 2024-12-06T03:46:48,617 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/b931220ea5954ebf8322376cbeb61c00 2024-12-06T03:46:48,627 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/.tmp/info/b931220ea5954ebf8322376cbeb61c00 as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/b931220ea5954ebf8322376cbeb61c00 2024-12-06T03:46:48,628 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/.tmp/info/0540349496d849a3bb449a7afca3b160 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828./info:regioninfo/1733456716742/Put/seqid=0 2024-12-06T03:46:48,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741851_1027 (size=7016) 2024-12-06T03:46:48,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741851_1027 (size=7016) 2024-12-06T03:46:48,634 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/.tmp/info/0540349496d849a3bb449a7afca3b160 2024-12-06T03:46:48,636 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/b931220ea5954ebf8322376cbeb61c00, entries=3, sequenceid=48, filesize=8.0 K 2024-12-06T03:46:48,638 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 89628604b4aad32f600e933e1f735828 in 35ms, sequenceid=48, compaction requested=true 2024-12-06T03:46:48,638 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/e47ab42376d44149887c52424ebc4085, hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/317e7c5774454df3a538df4b56f3295c, hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/53b20a2dd3ed4f02af02bf30d7348b8d] to archive 2024-12-06T03:46:48,641 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T03:46:48,645 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/e47ab42376d44149887c52424ebc4085 to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/e47ab42376d44149887c52424ebc4085 2024-12-06T03:46:48,648 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/317e7c5774454df3a538df4b56f3295c to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/317e7c5774454df3a538df4b56f3295c 2024-12-06T03:46:48,650 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/53b20a2dd3ed4f02af02bf30d7348b8d to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/info/53b20a2dd3ed4f02af02bf30d7348b8d 2024-12-06T03:46:48,660 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/.tmp/ns/1632645c89e048dab9377344affb3bfd is 43, key is default/ns:d/1733456715634/Put/seqid=0 2024-12-06T03:46:48,662 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6f1b912b0816:40143 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-06T03:46:48,664 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e47ab42376d44149887c52424ebc4085=12509, 317e7c5774454df3a538df4b56f3295c=12509, 53b20a2dd3ed4f02af02bf30d7348b8d=12509] 2024-12-06T03:46:48,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741852_1028 (size=5153) 2024-12-06T03:46:48,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741852_1028 (size=5153) 2024-12-06T03:46:48,670 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/.tmp/ns/1632645c89e048dab9377344affb3bfd 2024-12-06T03:46:48,671 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/default/TestLogRolling-testSlowSyncLogRolling/89628604b4aad32f600e933e1f735828/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-06T03:46:48,674 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:46:48,674 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 89628604b4aad32f600e933e1f735828: Waiting for close lock at 1733456808602Running coprocessor pre-close hooks at 1733456808603 (+1 ms)Disabling compacts and flushes for region at 1733456808603Disabling writes for close at 1733456808603Obtaining lock to block concurrent updates at 1733456808604 (+1 ms)Preparing flush snapshotting stores in 89628604b4aad32f600e933e1f735828 at 1733456808604Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733456808604Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. at 1733456808605 (+1 ms)Flushing 89628604b4aad32f600e933e1f735828/info: creating writer at 1733456808605Flushing 89628604b4aad32f600e933e1f735828/info: appending metadata at 1733456808609 (+4 ms)Flushing 89628604b4aad32f600e933e1f735828/info: closing flushed file at 1733456808609Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b4d9a33: reopening flushed file at 1733456808626 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 89628604b4aad32f600e933e1f735828 in 35ms, sequenceid=48, compaction requested=true at 1733456808638 (+12 ms)Writing region close event to WAL at 1733456808665 (+27 ms)Running coprocessor post-close hooks at 1733456808672 (+7 ms)Closed at 1733456808674 (+2 ms) 2024-12-06T03:46:48,675 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733456715861.89628604b4aad32f600e933e1f735828. 2024-12-06T03:46:48,693 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/.tmp/table/1f9f626236e849e48c2006783688f344 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733456716758/Put/seqid=0 2024-12-06T03:46:48,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741853_1029 (size=5396) 2024-12-06T03:46:48,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741853_1029 (size=5396) 2024-12-06T03:46:48,700 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/.tmp/table/1f9f626236e849e48c2006783688f344 2024-12-06T03:46:48,710 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/.tmp/info/0540349496d849a3bb449a7afca3b160 as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/info/0540349496d849a3bb449a7afca3b160 2024-12-06T03:46:48,718 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/info/0540349496d849a3bb449a7afca3b160, entries=10, sequenceid=11, filesize=6.9 K 2024-12-06T03:46:48,720 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/.tmp/ns/1632645c89e048dab9377344affb3bfd as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/ns/1632645c89e048dab9377344affb3bfd 2024-12-06T03:46:48,731 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/ns/1632645c89e048dab9377344affb3bfd, entries=2, sequenceid=11, filesize=5.0 K 2024-12-06T03:46:48,732 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/.tmp/table/1f9f626236e849e48c2006783688f344 as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/table/1f9f626236e849e48c2006783688f344 2024-12-06T03:46:48,741 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/table/1f9f626236e849e48c2006783688f344, entries=2, sequenceid=11, filesize=5.3 K 2024-12-06T03:46:48,743 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-12-06T03:46:48,749 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-06T03:46:48,750 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:46:48,750 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:46:48,750 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456808604Running coprocessor pre-close hooks at 1733456808604Disabling compacts and flushes for region at 1733456808604Disabling writes for close at 1733456808605 (+1 ms)Obtaining lock to block concurrent updates at 1733456808605Preparing flush snapshotting stores in 1588230740 at 1733456808605Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733456808606 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733456808606Flushing 1588230740/info: creating writer at 1733456808607 (+1 ms)Flushing 1588230740/info: appending metadata at 1733456808627 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733456808627Flushing 1588230740/ns: creating writer at 1733456808643 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733456808659 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733456808659Flushing 1588230740/table: creating writer at 1733456808679 (+20 ms)Flushing 1588230740/table: appending metadata at 1733456808693 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733456808693Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23841360: reopening flushed file at 1733456808709 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fd8522f: reopening flushed file at 1733456808718 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7be16b6b: reopening flushed file at 1733456808731 (+13 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1733456808743 (+12 ms)Writing region close event to WAL at 1733456808744 (+1 ms)Running coprocessor post-close hooks at 1733456808750 (+6 ms)Closed at 1733456808750 2024-12-06T03:46:48,751 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T03:46:48,805 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(976): stopping server 6f1b912b0816,33817,1733456713370; all regions closed. 2024-12-06T03:46:48,808 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,808 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,809 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,809 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,809 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741834_1010 (size=3066) 2024-12-06T03:46:48,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741834_1010 (size=3066) 2024-12-06T03:46:48,815 DEBUG [RS:0;6f1b912b0816:33817 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs 2024-12-06T03:46:48,815 INFO [RS:0;6f1b912b0816:33817 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C33817%2C1733456713370.meta:.meta(num 1733456715430) 2024-12-06T03:46:48,818 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,818 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,818 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,818 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,818 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:48,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741847_1023 (size=12695) 2024-12-06T03:46:48,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741847_1023 (size=12695) 2024-12-06T03:46:48,825 DEBUG [RS:0;6f1b912b0816:33817 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/oldWALs 2024-12-06T03:46:48,825 INFO [RS:0;6f1b912b0816:33817 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C33817%2C1733456713370:(num 1733456788487) 2024-12-06T03:46:48,825 DEBUG [RS:0;6f1b912b0816:33817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:48,825 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:46:48,825 INFO [RS:0;6f1b912b0816:33817 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:46:48,825 INFO [RS:0;6f1b912b0816:33817 {}] hbase.ChoreService(370): Chore service for: regionserver/6f1b912b0816:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T03:46:48,825 INFO [RS:0;6f1b912b0816:33817 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:46:48,825 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:46:48,826 INFO [RS:0;6f1b912b0816:33817 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33817 2024-12-06T03:46:48,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:46:48,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6f1b912b0816,33817,1733456713370 2024-12-06T03:46:48,847 INFO [RS:0;6f1b912b0816:33817 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:46:48,848 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6f1b912b0816,33817,1733456713370] 2024-12-06T03:46:48,864 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6f1b912b0816,33817,1733456713370 already deleted, retry=false 2024-12-06T03:46:48,864 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6f1b912b0816,33817,1733456713370 expired; onlineServers=0 2024-12-06T03:46:48,864 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6f1b912b0816,40143,1733456712663' ***** 2024-12-06T03:46:48,864 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T03:46:48,865 INFO [M:0;6f1b912b0816:40143 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:46:48,865 INFO [M:0;6f1b912b0816:40143 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:46:48,865 DEBUG [M:0;6f1b912b0816:40143 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T03:46:48,865 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T03:46:48,865 DEBUG [M:0;6f1b912b0816:40143 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T03:46:48,865 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456714635 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456714635,5,FailOnTimeoutGroup] 2024-12-06T03:46:48,865 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456714621 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456714621,5,FailOnTimeoutGroup] 2024-12-06T03:46:48,865 INFO [M:0;6f1b912b0816:40143 {}] hbase.ChoreService(370): Chore service for: master/6f1b912b0816:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T03:46:48,865 INFO [M:0;6f1b912b0816:40143 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:46:48,865 DEBUG [M:0;6f1b912b0816:40143 {}] master.HMaster(1795): Stopping service threads 2024-12-06T03:46:48,866 INFO [M:0;6f1b912b0816:40143 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T03:46:48,866 INFO [M:0;6f1b912b0816:40143 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:46:48,866 INFO [M:0;6f1b912b0816:40143 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T03:46:48,866 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T03:46:48,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T03:46:48,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:48,873 DEBUG [M:0;6f1b912b0816:40143 {}] zookeeper.ZKUtil(347): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T03:46:48,873 WARN [M:0;6f1b912b0816:40143 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T03:46:48,874 INFO [M:0;6f1b912b0816:40143 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/.lastflushedseqids 2024-12-06T03:46:48,890 INFO [regionserver/6f1b912b0816:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:46:48,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741854_1030 (size=130) 2024-12-06T03:46:48,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741854_1030 (size=130) 2024-12-06T03:46:48,897 INFO [M:0;6f1b912b0816:40143 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T03:46:48,898 INFO [M:0;6f1b912b0816:40143 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T03:46:48,898 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:46:48,898 INFO [M:0;6f1b912b0816:40143 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:48,898 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:48,898 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:46:48,898 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:48,898 INFO [M:0;6f1b912b0816:40143 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-12-06T03:46:48,923 DEBUG [M:0;6f1b912b0816:40143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/18a44202ee894bd8abcad868b827eb48 is 82, key is hbase:meta,,1/info:regioninfo/1733456715490/Put/seqid=0 2024-12-06T03:46:48,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741855_1031 (size=5672) 2024-12-06T03:46:48,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741855_1031 (size=5672) 2024-12-06T03:46:48,930 INFO [M:0;6f1b912b0816:40143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/18a44202ee894bd8abcad868b827eb48 2024-12-06T03:46:48,954 DEBUG [M:0;6f1b912b0816:40143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/903b9d7bcb924f3fbdb4f1f00b593a0c is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733456716766/Put/seqid=0 2024-12-06T03:46:48,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:46:48,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x101aa07e3bf0001, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:46:48,957 INFO [RS:0;6f1b912b0816:33817 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:46:48,957 INFO [RS:0;6f1b912b0816:33817 {}] regionserver.HRegionServer(1031): Exiting; stopping=6f1b912b0816,33817,1733456713370; zookeeper connection closed. 2024-12-06T03:46:48,957 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@75a1d1e4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@75a1d1e4 2024-12-06T03:46:48,958 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T03:46:48,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741856_1032 (size=6246) 2024-12-06T03:46:48,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741856_1032 (size=6246) 2024-12-06T03:46:48,960 INFO [M:0;6f1b912b0816:40143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/903b9d7bcb924f3fbdb4f1f00b593a0c 2024-12-06T03:46:48,967 INFO [M:0;6f1b912b0816:40143 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 903b9d7bcb924f3fbdb4f1f00b593a0c 2024-12-06T03:46:48,985 DEBUG [M:0;6f1b912b0816:40143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0e06c736a0094d648659d92b7c5a213f is 69, key is 6f1b912b0816,33817,1733456713370/rs:state/1733456714778/Put/seqid=0 2024-12-06T03:46:48,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741857_1033 (size=5156) 2024-12-06T03:46:48,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741857_1033 (size=5156) 2024-12-06T03:46:48,995 INFO [M:0;6f1b912b0816:40143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0e06c736a0094d648659d92b7c5a213f 2024-12-06T03:46:49,023 DEBUG [M:0;6f1b912b0816:40143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/183d8db79a8447b781a80775ad09086a is 52, key is load_balancer_on/state:d/1733456715844/Put/seqid=0 2024-12-06T03:46:49,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741858_1034 (size=5056) 2024-12-06T03:46:49,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741858_1034 (size=5056) 2024-12-06T03:46:49,031 INFO [M:0;6f1b912b0816:40143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/183d8db79a8447b781a80775ad09086a 2024-12-06T03:46:49,038 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/18a44202ee894bd8abcad868b827eb48 as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/18a44202ee894bd8abcad868b827eb48 2024-12-06T03:46:49,044 INFO [M:0;6f1b912b0816:40143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/18a44202ee894bd8abcad868b827eb48, entries=8, sequenceid=59, filesize=5.5 K 2024-12-06T03:46:49,047 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/903b9d7bcb924f3fbdb4f1f00b593a0c as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/903b9d7bcb924f3fbdb4f1f00b593a0c 2024-12-06T03:46:49,054 INFO [M:0;6f1b912b0816:40143 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 903b9d7bcb924f3fbdb4f1f00b593a0c 2024-12-06T03:46:49,054 INFO [M:0;6f1b912b0816:40143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/903b9d7bcb924f3fbdb4f1f00b593a0c, entries=6, sequenceid=59, filesize=6.1 K 2024-12-06T03:46:49,055 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0e06c736a0094d648659d92b7c5a213f as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0e06c736a0094d648659d92b7c5a213f 2024-12-06T03:46:49,062 INFO [M:0;6f1b912b0816:40143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0e06c736a0094d648659d92b7c5a213f, entries=1, sequenceid=59, filesize=5.0 K 2024-12-06T03:46:49,064 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/183d8db79a8447b781a80775ad09086a as hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/183d8db79a8447b781a80775ad09086a 2024-12-06T03:46:49,072 INFO [M:0;6f1b912b0816:40143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/183d8db79a8447b781a80775ad09086a, entries=1, sequenceid=59, filesize=4.9 K 2024-12-06T03:46:49,074 INFO [M:0;6f1b912b0816:40143 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=59, compaction requested=false 2024-12-06T03:46:49,075 INFO [M:0;6f1b912b0816:40143 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:49,076 DEBUG [M:0;6f1b912b0816:40143 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456808898Disabling compacts and flushes for region at 1733456808898Disabling writes for close at 1733456808898Obtaining lock to block concurrent updates at 1733456808898Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733456808898Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1733456808899 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733456808900 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733456808900Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733456808922 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733456808922Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733456808937 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733456808953 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733456808953Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733456808968 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733456808984 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733456808984Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733456809001 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733456809022 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733456809022Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77bad32e: reopening flushed file at 1733456809037 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cdfb508: reopening flushed file at 1733456809045 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a9134d3: reopening flushed file at 1733456809054 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51c1d87f: reopening flushed file at 1733456809063 (+9 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=59, compaction requested=false at 1733456809074 (+11 ms)Writing region close event to WAL at 1733456809075 (+1 ms)Closed at 1733456809075 2024-12-06T03:46:49,077 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:49,077 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:49,077 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:49,077 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:49,077 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:49,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741830_1006 (size=27961) 2024-12-06T03:46:49,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33917 is added to blk_1073741830_1006 (size=27961) 2024-12-06T03:46:49,080 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:46:49,081 INFO [M:0;6f1b912b0816:40143 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T03:46:49,081 INFO [M:0;6f1b912b0816:40143 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40143 2024-12-06T03:46:49,081 INFO [M:0;6f1b912b0816:40143 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:46:49,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:46:49,214 INFO [M:0;6f1b912b0816:40143 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:46:49,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40143-0x101aa07e3bf0000, quorum=127.0.0.1:56906, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:46:49,223 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55d18735{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:46:49,225 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:46:49,225 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:46:49,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:46:49,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.log.dir/,STOPPED} 2024-12-06T03:46:49,229 WARN [BP-508770624-172.17.0.2-1733456709070 heartbeating to localhost/127.0.0.1:37783 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:46:49,229 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:46:49,229 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:46:49,229 WARN [BP-508770624-172.17.0.2-1733456709070 heartbeating to localhost/127.0.0.1:37783 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-508770624-172.17.0.2-1733456709070 (Datanode Uuid dc95875a-842a-469d-8ec7-28a7d8ccfcb8) service to localhost/127.0.0.1:37783 2024-12-06T03:46:49,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/data/data3/current/BP-508770624-172.17.0.2-1733456709070 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:46:49,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/data/data4/current/BP-508770624-172.17.0.2-1733456709070 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:46:49,231 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:46:49,232 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59e63bea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:46:49,233 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:46:49,233 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:46:49,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:46:49,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.log.dir/,STOPPED} 2024-12-06T03:46:49,235 WARN [BP-508770624-172.17.0.2-1733456709070 heartbeating to localhost/127.0.0.1:37783 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:46:49,235 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:46:49,235 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:46:49,235 WARN [BP-508770624-172.17.0.2-1733456709070 heartbeating to localhost/127.0.0.1:37783 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-508770624-172.17.0.2-1733456709070 (Datanode Uuid 13bb1c0d-c025-4a1d-b490-9d93180ea5c0) service to localhost/127.0.0.1:37783 2024-12-06T03:46:49,236 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/data/data1/current/BP-508770624-172.17.0.2-1733456709070 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:46:49,236 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/cluster_bc6c1dfa-016c-d22b-9f9b-bc69b9fd48db/data/data2/current/BP-508770624-172.17.0.2-1733456709070 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:46:49,237 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:46:49,246 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:46:49,247 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:46:49,247 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:46:49,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:46:49,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.log.dir/,STOPPED} 2024-12-06T03:46:49,256 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T03:46:49,289 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T03:46:49,299 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37783 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37783 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37783 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/6f1b912b0816:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:37783 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37783 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/6f1b912b0816:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/6f1b912b0816:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37783 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37783 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37783 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@2977aefe java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=191 (was 399), ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=8764 (was 8833) 2024-12-06T03:46:49,305 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=191, ProcessCount=11, AvailableMemoryMB=8763 2024-12-06T03:46:49,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T03:46:49,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.log.dir so I do NOT create it in target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0 2024-12-06T03:46:49,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9ac1238c-088c-88fd-62dc-6598d2762f50/hadoop.tmp.dir so I do NOT create it in target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0 2024-12-06T03:46:49,306 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e, deleteOnExit=true 2024-12-06T03:46:49,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-06T03:46:49,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/test.cache.data in system properties and HBase conf 2024-12-06T03:46:49,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T03:46:49,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.log.dir in system properties and HBase conf 2024-12-06T03:46:49,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T03:46:49,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T03:46:49,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T03:46:49,307 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T03:46:49,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:46:49,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:46:49,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T03:46:49,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:46:49,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T03:46:49,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T03:46:49,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:46:49,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:46:49,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T03:46:49,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/nfs.dump.dir in system properties and HBase conf 2024-12-06T03:46:49,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/java.io.tmpdir in system properties and HBase conf 2024-12-06T03:46:49,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:46:49,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T03:46:49,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T03:46:49,323 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:46:49,638 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:46:49,644 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:46:49,645 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:46:49,646 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:46:49,646 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:46:49,646 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:46:49,647 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43fab4bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:46:49,647 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ea6e47a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:46:49,755 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2281152e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/java.io.tmpdir/jetty-localhost-43959-hadoop-hdfs-3_4_1-tests_jar-_-any-13900028755535516254/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:46:49,756 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3485277{HTTP/1.1, (http/1.1)}{localhost:43959} 2024-12-06T03:46:49,756 INFO [Time-limited test {}] server.Server(415): Started @102621ms 2024-12-06T03:46:49,768 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:46:50,009 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:46:50,013 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:46:50,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:46:50,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:46:50,015 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:46:50,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@345536c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:46:50,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33e82987{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:46:50,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e335929{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/java.io.tmpdir/jetty-localhost-45589-hadoop-hdfs-3_4_1-tests_jar-_-any-11669766580705349313/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:46:50,118 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@726508a1{HTTP/1.1, (http/1.1)}{localhost:45589} 2024-12-06T03:46:50,118 INFO [Time-limited test {}] server.Server(415): Started @102983ms 2024-12-06T03:46:50,120 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:46:50,163 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:46:50,166 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:46:50,167 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:46:50,167 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:46:50,167 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:46:50,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3168153a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:46:50,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4fcb1c4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:46:50,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11ff445e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/java.io.tmpdir/jetty-localhost-41957-hadoop-hdfs-3_4_1-tests_jar-_-any-5390039776371426357/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:46:50,274 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e63263c{HTTP/1.1, (http/1.1)}{localhost:41957} 2024-12-06T03:46:50,275 INFO [Time-limited test {}] server.Server(415): Started @103140ms 2024-12-06T03:46:50,276 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:46:50,972 WARN [Thread-450 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/data/data1/current/BP-1453088084-172.17.0.2-1733456809335/current, will proceed with Du for space computation calculation, 2024-12-06T03:46:50,977 WARN [Thread-451 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/data/data2/current/BP-1453088084-172.17.0.2-1733456809335/current, will proceed with Du for space computation calculation, 2024-12-06T03:46:50,995 WARN [Thread-414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:46:50,998 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdff791c1d7f64ddb with lease ID 0x148e2739af5b98c4: Processing first storage report for DS-13123cb2-9b9e-4add-a3b1-feb08251f198 from datanode DatanodeRegistration(127.0.0.1:36535, datanodeUuid=ba323b6f-5b7f-4c44-ac2b-6e864510ac6e, infoPort=39173, infoSecurePort=0, ipcPort=38811, storageInfo=lv=-57;cid=testClusterID;nsid=1804601254;c=1733456809335) 2024-12-06T03:46:50,998 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdff791c1d7f64ddb with lease ID 0x148e2739af5b98c4: from storage DS-13123cb2-9b9e-4add-a3b1-feb08251f198 node DatanodeRegistration(127.0.0.1:36535, datanodeUuid=ba323b6f-5b7f-4c44-ac2b-6e864510ac6e, infoPort=39173, infoSecurePort=0, ipcPort=38811, storageInfo=lv=-57;cid=testClusterID;nsid=1804601254;c=1733456809335), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T03:46:50,998 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdff791c1d7f64ddb with lease ID 0x148e2739af5b98c4: Processing first storage report for DS-7adc82d7-f9e0-496f-afd3-49951734c4ef from datanode DatanodeRegistration(127.0.0.1:36535, datanodeUuid=ba323b6f-5b7f-4c44-ac2b-6e864510ac6e, infoPort=39173, infoSecurePort=0, ipcPort=38811, storageInfo=lv=-57;cid=testClusterID;nsid=1804601254;c=1733456809335) 2024-12-06T03:46:50,999 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdff791c1d7f64ddb with lease ID 0x148e2739af5b98c4: from storage DS-7adc82d7-f9e0-496f-afd3-49951734c4ef node DatanodeRegistration(127.0.0.1:36535, datanodeUuid=ba323b6f-5b7f-4c44-ac2b-6e864510ac6e, infoPort=39173, infoSecurePort=0, ipcPort=38811, storageInfo=lv=-57;cid=testClusterID;nsid=1804601254;c=1733456809335), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:46:51,092 WARN [Thread-461 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/data/data3/current/BP-1453088084-172.17.0.2-1733456809335/current, will proceed with Du for space computation calculation, 2024-12-06T03:46:51,092 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/data/data4/current/BP-1453088084-172.17.0.2-1733456809335/current, will proceed with Du for space computation calculation, 2024-12-06T03:46:51,111 WARN [Thread-437 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:46:51,114 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52a491a8542fdbe7 with lease ID 0x148e2739af5b98c5: Processing first storage report for DS-bc474757-43ad-474f-81dc-861b0d3eeedb from datanode DatanodeRegistration(127.0.0.1:40855, datanodeUuid=63298453-577e-46b7-a99c-9bcd8d55da01, infoPort=36015, infoSecurePort=0, ipcPort=43043, storageInfo=lv=-57;cid=testClusterID;nsid=1804601254;c=1733456809335) 2024-12-06T03:46:51,114 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52a491a8542fdbe7 with lease ID 0x148e2739af5b98c5: from storage DS-bc474757-43ad-474f-81dc-861b0d3eeedb node DatanodeRegistration(127.0.0.1:40855, datanodeUuid=63298453-577e-46b7-a99c-9bcd8d55da01, infoPort=36015, infoSecurePort=0, ipcPort=43043, storageInfo=lv=-57;cid=testClusterID;nsid=1804601254;c=1733456809335), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:46:51,114 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52a491a8542fdbe7 with lease ID 0x148e2739af5b98c5: Processing first storage report for DS-48720538-255f-4f2c-9259-fdbc0006608c from datanode DatanodeRegistration(127.0.0.1:40855, datanodeUuid=63298453-577e-46b7-a99c-9bcd8d55da01, infoPort=36015, infoSecurePort=0, ipcPort=43043, storageInfo=lv=-57;cid=testClusterID;nsid=1804601254;c=1733456809335) 2024-12-06T03:46:51,114 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52a491a8542fdbe7 with lease ID 0x148e2739af5b98c5: from storage DS-48720538-255f-4f2c-9259-fdbc0006608c node DatanodeRegistration(127.0.0.1:40855, datanodeUuid=63298453-577e-46b7-a99c-9bcd8d55da01, infoPort=36015, infoSecurePort=0, ipcPort=43043, storageInfo=lv=-57;cid=testClusterID;nsid=1804601254;c=1733456809335), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:46:51,213 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0 2024-12-06T03:46:51,217 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/zookeeper_0, clientPort=58256, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T03:46:51,218 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58256 2024-12-06T03:46:51,218 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:51,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:51,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:46:51,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:46:51,232 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88 with version=8 2024-12-06T03:46:51,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/hbase-staging 2024-12-06T03:46:51,235 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:46:51,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:51,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:51,235 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:46:51,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:51,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:46:51,235 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T03:46:51,236 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:46:51,236 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42255 2024-12-06T03:46:51,238 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42255 connecting to ZooKeeper ensemble=127.0.0.1:58256 2024-12-06T03:46:51,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:422550x0, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:46:51,302 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42255-0x101aa0967950000 connected 2024-12-06T03:46:51,376 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:51,378 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:51,381 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:46:51,381 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88, hbase.cluster.distributed=false 2024-12-06T03:46:51,383 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:46:51,390 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42255 2024-12-06T03:46:51,390 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42255 2024-12-06T03:46:51,391 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42255 2024-12-06T03:46:51,391 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42255 2024-12-06T03:46:51,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42255 2024-12-06T03:46:51,412 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:46:51,412 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:51,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:51,413 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:46:51,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:51,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:46:51,413 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T03:46:51,413 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:46:51,414 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42815 2024-12-06T03:46:51,416 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42815 connecting to ZooKeeper ensemble=127.0.0.1:58256 2024-12-06T03:46:51,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:51,420 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:51,443 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428150x0, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:46:51,444 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428150x0, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:46:51,444 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T03:46:51,445 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42815-0x101aa0967950001 connected 2024-12-06T03:46:51,445 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T03:46:51,446 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T03:46:51,447 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:46:51,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42815 2024-12-06T03:46:51,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42815 2024-12-06T03:46:51,458 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42815 2024-12-06T03:46:51,461 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42815 2024-12-06T03:46:51,461 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42815 2024-12-06T03:46:51,479 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6f1b912b0816:42255 2024-12-06T03:46:51,479 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6f1b912b0816,42255,1733456811235 2024-12-06T03:46:51,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:46:51,490 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:46:51,491 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6f1b912b0816,42255,1733456811235 2024-12-06T03:46:51,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,501 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T03:46:51,501 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,502 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T03:46:51,502 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6f1b912b0816,42255,1733456811235 from backup master directory 2024-12-06T03:46:51,509 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:46:51,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6f1b912b0816,42255,1733456811235 2024-12-06T03:46:51,509 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:46:51,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:46:51,509 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6f1b912b0816,42255,1733456811235 2024-12-06T03:46:51,515 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/hbase.id] with ID: 5f732aa0-ed66-4af2-8aab-d320112a2213 2024-12-06T03:46:51,515 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/.tmp/hbase.id 2024-12-06T03:46:51,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:46:51,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:46:51,525 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/.tmp/hbase.id]:[hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/hbase.id] 2024-12-06T03:46:51,539 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:51,539 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T03:46:51,541 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-06T03:46:51,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,551 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:46:51,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:46:51,559 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:46:51,560 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T03:46:51,560 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:46:51,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:46:51,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:46:51,574 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store 2024-12-06T03:46:51,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:46:51,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:46:51,588 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:51,588 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:46:51,589 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:51,589 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:51,589 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:46:51,589 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:51,589 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:51,589 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456811588Disabling compacts and flushes for region at 1733456811588Disabling writes for close at 1733456811589 (+1 ms)Writing region close event to WAL at 1733456811589Closed at 1733456811589 2024-12-06T03:46:51,590 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/.initializing 2024-12-06T03:46:51,590 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/WALs/6f1b912b0816,42255,1733456811235 2024-12-06T03:46:51,593 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C42255%2C1733456811235, suffix=, logDir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/WALs/6f1b912b0816,42255,1733456811235, archiveDir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/oldWALs, maxLogs=10 2024-12-06T03:46:51,594 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C42255%2C1733456811235.1733456811594 2024-12-06T03:46:51,604 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/WALs/6f1b912b0816,42255,1733456811235/6f1b912b0816%2C42255%2C1733456811235.1733456811594 2024-12-06T03:46:51,605 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36015:36015),(127.0.0.1/127.0.0.1:39173:39173)] 2024-12-06T03:46:51,606 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:46:51,606 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:51,606 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,607 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T03:46:51,610 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:51,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:51,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T03:46:51,613 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:51,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:46:51,614 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T03:46:51,616 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:51,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:46:51,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T03:46:51,618 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:51,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:46:51,619 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,620 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,621 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,622 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,622 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,623 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T03:46:51,625 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:51,628 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:46:51,628 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751641, jitterRate=-0.04423925280570984}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T03:46:51,629 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733456811607Initializing all the Stores at 1733456811608 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456811608Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456811608Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456811608Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456811608Cleaning up temporary data from old regions at 1733456811622 (+14 ms)Region opened successfully at 1733456811629 (+7 ms) 2024-12-06T03:46:51,630 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T03:46:51,634 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605d0ef3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:46:51,635 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T03:46:51,635 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T03:46:51,636 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T03:46:51,636 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T03:46:51,636 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T03:46:51,637 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-06T03:46:51,637 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T03:46:51,640 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T03:46:51,641 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T03:46:51,659 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T03:46:51,660 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T03:46:51,661 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T03:46:51,667 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T03:46:51,668 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T03:46:51,669 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T03:46:51,676 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T03:46:51,677 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T03:46:51,684 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T03:46:51,687 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T03:46:51,698 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T03:46:51,709 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:46:51,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:46:51,709 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,710 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6f1b912b0816,42255,1733456811235, sessionid=0x101aa0967950000, setting cluster-up flag (Was=false) 2024-12-06T03:46:51,726 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,751 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T03:46:51,753 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,42255,1733456811235 2024-12-06T03:46:51,768 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:51,793 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T03:46:51,794 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,42255,1733456811235 2024-12-06T03:46:51,796 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T03:46:51,799 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T03:46:51,799 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T03:46:51,799 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T03:46:51,800 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6f1b912b0816,42255,1733456811235 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T03:46:51,802 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:46:51,802 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:46:51,802 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:46:51,803 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:46:51,803 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6f1b912b0816:0, corePoolSize=10, maxPoolSize=10 2024-12-06T03:46:51,803 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,803 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:46:51,803 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,804 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733456841804 2024-12-06T03:46:51,804 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T03:46:51,804 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T03:46:51,804 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T03:46:51,805 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T03:46:51,805 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T03:46:51,805 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T03:46:51,805 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:46:51,806 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T03:46:51,806 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,806 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T03:46:51,807 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T03:46:51,807 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T03:46:51,807 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T03:46:51,807 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T03:46:51,807 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:51,807 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456811807,5,FailOnTimeoutGroup] 2024-12-06T03:46:51,807 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T03:46:51,808 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456811807,5,FailOnTimeoutGroup] 2024-12-06T03:46:51,808 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,808 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T03:46:51,808 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,808 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:46:51,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:46:51,816 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T03:46:51,817 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88 2024-12-06T03:46:51,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:46:51,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:46:51,828 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:51,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:46:51,831 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:46:51,831 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:51,832 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:51,832 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:46:51,834 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:46:51,834 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:51,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:51,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:46:51,836 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:46:51,836 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:51,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:51,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:46:51,839 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:46:51,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:51,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:51,840 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:46:51,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740 2024-12-06T03:46:51,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740 2024-12-06T03:46:51,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:46:51,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:46:51,843 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:46:51,845 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:46:51,847 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:46:51,848 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762675, jitterRate=-0.03020872175693512}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:46:51,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733456811828Initializing all the Stores at 1733456811829 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456811829Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456811829Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456811829Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456811829Cleaning up temporary data from old regions at 1733456811843 (+14 ms)Region opened successfully at 1733456811849 (+6 ms) 2024-12-06T03:46:51,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:46:51,849 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:46:51,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:46:51,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:46:51,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:46:51,851 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:46:51,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456811849Disabling compacts and flushes for region at 1733456811849Disabling writes for close at 1733456811849Writing region close event to WAL at 1733456811851 (+2 ms)Closed at 1733456811851 2024-12-06T03:46:51,852 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:46:51,852 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T03:46:51,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T03:46:51,854 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:46:51,856 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T03:46:51,864 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(746): ClusterId : 5f732aa0-ed66-4af2-8aab-d320112a2213 2024-12-06T03:46:51,864 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T03:46:51,900 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T03:46:51,900 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T03:46:51,910 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T03:46:51,911 DEBUG [RS:0;6f1b912b0816:42815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@383d1313, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:46:51,924 DEBUG [RS:0;6f1b912b0816:42815 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6f1b912b0816:42815 2024-12-06T03:46:51,924 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T03:46:51,924 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T03:46:51,924 DEBUG [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T03:46:51,925 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(2659): reportForDuty to master=6f1b912b0816,42255,1733456811235 with port=42815, startcode=1733456811412 2024-12-06T03:46:51,925 DEBUG [RS:0;6f1b912b0816:42815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T03:46:51,931 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33039, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T03:46:51,932 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42255 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6f1b912b0816,42815,1733456811412 2024-12-06T03:46:51,932 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42255 {}] master.ServerManager(517): Registering regionserver=6f1b912b0816,42815,1733456811412 2024-12-06T03:46:51,934 DEBUG [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88 2024-12-06T03:46:51,935 DEBUG [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46535 2024-12-06T03:46:51,935 DEBUG [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T03:46:51,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:46:51,943 DEBUG [RS:0;6f1b912b0816:42815 {}] zookeeper.ZKUtil(111): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6f1b912b0816,42815,1733456811412 2024-12-06T03:46:51,943 WARN [RS:0;6f1b912b0816:42815 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:46:51,943 INFO [RS:0;6f1b912b0816:42815 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:46:51,944 DEBUG [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/WALs/6f1b912b0816,42815,1733456811412 2024-12-06T03:46:51,947 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6f1b912b0816,42815,1733456811412] 2024-12-06T03:46:51,950 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T03:46:51,953 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T03:46:51,954 INFO [RS:0;6f1b912b0816:42815 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T03:46:51,954 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,954 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T03:46:51,955 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T03:46:51,955 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,955 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,955 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:46:51,956 DEBUG [RS:0;6f1b912b0816:42815 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:46:51,957 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,957 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,957 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,957 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,957 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,957 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,42815,1733456811412-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:46:51,971 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T03:46:51,971 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,42815,1733456811412-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,972 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,972 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.Replication(171): 6f1b912b0816,42815,1733456811412 started 2024-12-06T03:46:51,988 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:51,988 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(1482): Serving as 6f1b912b0816,42815,1733456811412, RpcServer on 6f1b912b0816/172.17.0.2:42815, sessionid=0x101aa0967950001 2024-12-06T03:46:51,989 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T03:46:51,989 DEBUG [RS:0;6f1b912b0816:42815 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6f1b912b0816,42815,1733456811412 2024-12-06T03:46:51,989 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,42815,1733456811412' 2024-12-06T03:46:51,989 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T03:46:51,989 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T03:46:51,990 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T03:46:51,990 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T03:46:51,990 DEBUG [RS:0;6f1b912b0816:42815 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6f1b912b0816,42815,1733456811412 2024-12-06T03:46:51,990 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,42815,1733456811412' 2024-12-06T03:46:51,990 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T03:46:51,991 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T03:46:51,991 DEBUG [RS:0;6f1b912b0816:42815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T03:46:51,991 INFO [RS:0;6f1b912b0816:42815 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T03:46:51,991 INFO [RS:0;6f1b912b0816:42815 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T03:46:52,006 WARN [6f1b912b0816:42255 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-06T03:46:52,095 INFO [RS:0;6f1b912b0816:42815 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C42815%2C1733456811412, suffix=, logDir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/WALs/6f1b912b0816,42815,1733456811412, archiveDir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/oldWALs, maxLogs=32 2024-12-06T03:46:52,097 INFO [RS:0;6f1b912b0816:42815 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C42815%2C1733456811412.1733456812097 2024-12-06T03:46:52,106 INFO [RS:0;6f1b912b0816:42815 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/WALs/6f1b912b0816,42815,1733456811412/6f1b912b0816%2C42815%2C1733456811412.1733456812097 2024-12-06T03:46:52,107 DEBUG [RS:0;6f1b912b0816:42815 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39173:39173),(127.0.0.1/127.0.0.1:36015:36015)] 2024-12-06T03:46:52,257 DEBUG [6f1b912b0816:42255 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T03:46:52,257 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6f1b912b0816,42815,1733456811412 2024-12-06T03:46:52,260 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,42815,1733456811412, state=OPENING 2024-12-06T03:46:52,293 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T03:46:52,301 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:52,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:52,302 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:46:52,302 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:46:52,302 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:46:52,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,42815,1733456811412}] 2024-12-06T03:46:52,459 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T03:46:52,461 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39175, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T03:46:52,465 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T03:46:52,465 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:46:52,468 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C42815%2C1733456811412.meta, suffix=.meta, logDir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/WALs/6f1b912b0816,42815,1733456811412, archiveDir=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/oldWALs, maxLogs=32 2024-12-06T03:46:52,470 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C42815%2C1733456811412.meta.1733456812470.meta 2024-12-06T03:46:52,477 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/WALs/6f1b912b0816,42815,1733456811412/6f1b912b0816%2C42815%2C1733456811412.meta.1733456812470.meta 2024-12-06T03:46:52,479 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39173:39173),(127.0.0.1/127.0.0.1:36015:36015)] 2024-12-06T03:46:52,480 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:46:52,480 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T03:46:52,481 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T03:46:52,481 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T03:46:52,481 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T03:46:52,481 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:52,481 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T03:46:52,481 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T03:46:52,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:46:52,484 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:46:52,484 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:52,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:52,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:46:52,486 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:46:52,486 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:52,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:52,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:46:52,488 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:46:52,488 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:52,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:52,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:46:52,490 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:46:52,490 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:52,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:52,490 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:46:52,491 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740 2024-12-06T03:46:52,493 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740 2024-12-06T03:46:52,494 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:46:52,494 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:46:52,495 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:46:52,497 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:46:52,498 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695801, jitterRate=-0.1152438372373581}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:46:52,498 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T03:46:52,499 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733456812481Writing region info on filesystem at 1733456812481Initializing all the Stores at 1733456812482 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456812482Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456812483 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456812483Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456812483Cleaning up temporary data from old regions at 1733456812494 (+11 ms)Running coprocessor post-open hooks at 1733456812498 (+4 ms)Region opened successfully at 1733456812499 (+1 ms) 2024-12-06T03:46:52,500 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733456812458 2024-12-06T03:46:52,503 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T03:46:52,503 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T03:46:52,504 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,42815,1733456811412 2024-12-06T03:46:52,505 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,42815,1733456811412, state=OPEN 2024-12-06T03:46:52,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:46:52,567 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6f1b912b0816,42815,1733456811412 2024-12-06T03:46:52,567 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:46:52,567 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:46:52,568 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:46:52,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T03:46:52,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,42815,1733456811412 in 265 msec 2024-12-06T03:46:52,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T03:46:52,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 721 msec 2024-12-06T03:46:52,578 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:46:52,578 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T03:46:52,580 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:46:52,580 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,42815,1733456811412, seqNum=-1] 2024-12-06T03:46:52,581 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:46:52,582 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42933, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:46:52,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 791 msec 2024-12-06T03:46:52,591 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733456812590, completionTime=-1 2024-12-06T03:46:52,591 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T03:46:52,591 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T03:46:52,593 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-06T03:46:52,593 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733456872593 2024-12-06T03:46:52,593 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733456932593 2024-12-06T03:46:52,593 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-06T03:46:52,594 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,42255,1733456811235-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:52,594 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,42255,1733456811235-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:52,594 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,42255,1733456811235-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:52,594 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6f1b912b0816:42255, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:52,594 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:52,594 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:52,596 DEBUG [master/6f1b912b0816:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T03:46:52,600 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.090sec 2024-12-06T03:46:52,600 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T03:46:52,600 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T03:46:52,600 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T03:46:52,600 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T03:46:52,600 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T03:46:52,600 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,42255,1733456811235-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:46:52,601 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,42255,1733456811235-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T03:46:52,604 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T03:46:52,604 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T03:46:52,604 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,42255,1733456811235-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:52,665 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62845f7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:46:52,665 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6f1b912b0816,42255,-1 for getting cluster id 2024-12-06T03:46:52,665 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T03:46:52,669 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5f732aa0-ed66-4af2-8aab-d320112a2213' 2024-12-06T03:46:52,669 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T03:46:52,670 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5f732aa0-ed66-4af2-8aab-d320112a2213" 2024-12-06T03:46:52,670 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22b8ea1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:46:52,671 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6f1b912b0816,42255,-1] 2024-12-06T03:46:52,671 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T03:46:52,672 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:52,674 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34544, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T03:46:52,675 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69f8a81d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:46:52,676 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:46:52,677 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,42815,1733456811412, seqNum=-1] 2024-12-06T03:46:52,678 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:46:52,679 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59072, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:46:52,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6f1b912b0816,42255,1733456811235 2024-12-06T03:46:52,682 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:52,686 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-06T03:46:52,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T03:46:52,686 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:46:52,687 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:46:52,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:52,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:52,687 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T03:46:52,687 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=860084951, stopped=false 2024-12-06T03:46:52,687 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T03:46:52,687 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6f1b912b0816,42255,1733456811235 2024-12-06T03:46:52,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:46:52,718 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:46:52,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:52,718 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:46:52,718 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:52,718 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:46:52,718 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:46:52,718 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:52,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:46:52,719 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:46:52,719 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6f1b912b0816,42815,1733456811412' ***** 2024-12-06T03:46:52,719 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T03:46:52,719 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T03:46:52,719 INFO [RS:0;6f1b912b0816:42815 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T03:46:52,719 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T03:46:52,719 INFO [RS:0;6f1b912b0816:42815 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T03:46:52,719 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(959): stopping server 6f1b912b0816,42815,1733456811412 2024-12-06T03:46:52,719 INFO [RS:0;6f1b912b0816:42815 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:46:52,719 INFO [RS:0;6f1b912b0816:42815 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6f1b912b0816:42815. 2024-12-06T03:46:52,719 DEBUG [RS:0;6f1b912b0816:42815 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:46:52,719 DEBUG [RS:0;6f1b912b0816:42815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:52,720 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T03:46:52,720 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T03:46:52,720 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T03:46:52,720 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T03:46:52,720 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-06T03:46:52,720 DEBUG [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-06T03:46:52,720 DEBUG [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-06T03:46:52,720 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:46:52,720 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:46:52,720 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:46:52,720 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:46:52,720 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:46:52,721 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-06T03:46:52,745 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740/.tmp/ns/7ee0f4c2bef5457ba4af7286fc025897 is 43, key is default/ns:d/1733456812583/Put/seqid=0 2024-12-06T03:46:52,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741835_1011 (size=5153) 2024-12-06T03:46:52,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741835_1011 (size=5153) 2024-12-06T03:46:52,751 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740/.tmp/ns/7ee0f4c2bef5457ba4af7286fc025897 2024-12-06T03:46:52,760 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740/.tmp/ns/7ee0f4c2bef5457ba4af7286fc025897 as hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740/ns/7ee0f4c2bef5457ba4af7286fc025897 2024-12-06T03:46:52,768 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740/ns/7ee0f4c2bef5457ba4af7286fc025897, entries=2, sequenceid=6, filesize=5.0 K 2024-12-06T03:46:52,770 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false 2024-12-06T03:46:52,770 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T03:46:52,777 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T03:46:52,778 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:46:52,778 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:46:52,778 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456812720Running coprocessor pre-close hooks at 1733456812720Disabling compacts and flushes for region at 1733456812720Disabling writes for close at 1733456812720Obtaining lock to block concurrent updates at 1733456812721 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733456812721Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733456812721Flushing stores of hbase:meta,,1.1588230740 at 1733456812722 (+1 ms)Flushing 1588230740/ns: creating writer at 1733456812722Flushing 1588230740/ns: appending metadata at 1733456812744 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1733456812744Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e2e4c6a: reopening flushed file at 1733456812759 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false at 1733456812770 (+11 ms)Writing region close event to WAL at 1733456812772 (+2 ms)Running coprocessor post-close hooks at 1733456812778 (+6 ms)Closed at 1733456812778 2024-12-06T03:46:52,779 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T03:46:52,920 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(976): stopping server 6f1b912b0816,42815,1733456811412; all regions closed. 2024-12-06T03:46:52,921 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,921 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,921 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,922 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,922 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741834_1010 (size=1152) 2024-12-06T03:46:52,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741834_1010 (size=1152) 2024-12-06T03:46:52,929 DEBUG [RS:0;6f1b912b0816:42815 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/oldWALs 2024-12-06T03:46:52,929 INFO [RS:0;6f1b912b0816:42815 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C42815%2C1733456811412.meta:.meta(num 1733456812470) 2024-12-06T03:46:52,930 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,930 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,930 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,930 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,930 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:52,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741833_1009 (size=93) 2024-12-06T03:46:52,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741833_1009 (size=93) 2024-12-06T03:46:52,934 DEBUG [RS:0;6f1b912b0816:42815 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/oldWALs 2024-12-06T03:46:52,934 INFO [RS:0;6f1b912b0816:42815 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C42815%2C1733456811412:(num 1733456812097) 2024-12-06T03:46:52,934 DEBUG [RS:0;6f1b912b0816:42815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:52,934 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:46:52,934 INFO [RS:0;6f1b912b0816:42815 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:46:52,935 INFO [RS:0;6f1b912b0816:42815 {}] hbase.ChoreService(370): Chore service for: regionserver/6f1b912b0816:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T03:46:52,935 INFO [RS:0;6f1b912b0816:42815 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:46:52,935 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:46:52,935 INFO [RS:0;6f1b912b0816:42815 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42815 2024-12-06T03:46:52,957 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6f1b912b0816,42815,1733456811412 2024-12-06T03:46:52,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:46:52,957 INFO [RS:0;6f1b912b0816:42815 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:46:52,967 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6f1b912b0816,42815,1733456811412] 2024-12-06T03:46:52,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:46:52,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:46:52,976 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6f1b912b0816,42815,1733456811412 already deleted, retry=false 2024-12-06T03:46:52,976 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6f1b912b0816,42815,1733456811412 expired; onlineServers=0 2024-12-06T03:46:52,976 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6f1b912b0816,42255,1733456811235' ***** 2024-12-06T03:46:52,976 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T03:46:52,976 INFO [M:0;6f1b912b0816:42255 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:46:52,976 INFO [M:0;6f1b912b0816:42255 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:46:52,976 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-06T03:46:52,976 DEBUG [M:0;6f1b912b0816:42255 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T03:46:52,977 DEBUG [M:0;6f1b912b0816:42255 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T03:46:52,977 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T03:46:52,977 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456811807 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456811807,5,FailOnTimeoutGroup] 2024-12-06T03:46:52,977 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456811807 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456811807,5,FailOnTimeoutGroup] 2024-12-06T03:46:52,977 INFO [M:0;6f1b912b0816:42255 {}] hbase.ChoreService(370): Chore service for: master/6f1b912b0816:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T03:46:52,978 INFO [M:0;6f1b912b0816:42255 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:46:52,978 DEBUG [M:0;6f1b912b0816:42255 {}] master.HMaster(1795): Stopping service threads 2024-12-06T03:46:52,978 INFO [M:0;6f1b912b0816:42255 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T03:46:52,978 INFO [M:0;6f1b912b0816:42255 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:46:52,978 INFO [M:0;6f1b912b0816:42255 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T03:46:52,978 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T03:46:52,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T03:46:52,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:52,984 DEBUG [M:0;6f1b912b0816:42255 {}] zookeeper.ZKUtil(347): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T03:46:52,984 WARN [M:0;6f1b912b0816:42255 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T03:46:52,985 INFO [M:0;6f1b912b0816:42255 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/.lastflushedseqids 2024-12-06T03:46:52,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741836_1012 (size=99) 2024-12-06T03:46:52,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741836_1012 (size=99) 2024-12-06T03:46:52,991 INFO [M:0;6f1b912b0816:42255 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T03:46:52,992 INFO [M:0;6f1b912b0816:42255 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T03:46:52,992 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:46:52,992 INFO [M:0;6f1b912b0816:42255 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:52,992 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:52,992 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:46:52,992 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:52,992 INFO [M:0;6f1b912b0816:42255 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-06T03:46:53,009 DEBUG [M:0;6f1b912b0816:42255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/58d5e1d41bde47a28834d5b6c6984751 is 82, key is hbase:meta,,1/info:regioninfo/1733456812504/Put/seqid=0 2024-12-06T03:46:53,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741837_1013 (size=5672) 2024-12-06T03:46:53,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741837_1013 (size=5672) 2024-12-06T03:46:53,015 INFO [M:0;6f1b912b0816:42255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/58d5e1d41bde47a28834d5b6c6984751 2024-12-06T03:46:53,037 DEBUG [M:0;6f1b912b0816:42255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93c6d1f016c549e48bdbef4271fa59ca is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733456812589/Put/seqid=0 2024-12-06T03:46:53,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741838_1014 (size=5275) 2024-12-06T03:46:53,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741838_1014 (size=5275) 2024-12-06T03:46:53,068 INFO [RS:0;6f1b912b0816:42815 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:46:53,068 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:46:53,068 INFO [RS:0;6f1b912b0816:42815 {}] regionserver.HRegionServer(1031): Exiting; stopping=6f1b912b0816,42815,1733456811412; zookeeper connection closed. 2024-12-06T03:46:53,068 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42815-0x101aa0967950001, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:46:53,068 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@50a55558 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@50a55558 2024-12-06T03:46:53,069 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T03:46:53,445 INFO [M:0;6f1b912b0816:42255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93c6d1f016c549e48bdbef4271fa59ca 2024-12-06T03:46:53,478 DEBUG [M:0;6f1b912b0816:42255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d96aada38104cc2a2a709023fabf464 is 69, key is 6f1b912b0816,42815,1733456811412/rs:state/1733456811933/Put/seqid=0 2024-12-06T03:46:53,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741839_1015 (size=5156) 2024-12-06T03:46:53,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741839_1015 (size=5156) 2024-12-06T03:46:53,483 INFO [M:0;6f1b912b0816:42255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d96aada38104cc2a2a709023fabf464 2024-12-06T03:46:53,505 DEBUG [M:0;6f1b912b0816:42255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/265d0c33e30140cbb993e0f7957bda15 is 52, key is load_balancer_on/state:d/1733456812685/Put/seqid=0 2024-12-06T03:46:53,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741840_1016 (size=5056) 2024-12-06T03:46:53,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741840_1016 (size=5056) 2024-12-06T03:46:53,511 INFO [M:0;6f1b912b0816:42255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/265d0c33e30140cbb993e0f7957bda15 2024-12-06T03:46:53,519 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/58d5e1d41bde47a28834d5b6c6984751 as hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/58d5e1d41bde47a28834d5b6c6984751 2024-12-06T03:46:53,527 INFO [M:0;6f1b912b0816:42255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/58d5e1d41bde47a28834d5b6c6984751, entries=8, sequenceid=29, filesize=5.5 K 2024-12-06T03:46:53,528 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93c6d1f016c549e48bdbef4271fa59ca as hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/93c6d1f016c549e48bdbef4271fa59ca 2024-12-06T03:46:53,536 INFO [M:0;6f1b912b0816:42255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/93c6d1f016c549e48bdbef4271fa59ca, entries=3, sequenceid=29, filesize=5.2 K 2024-12-06T03:46:53,538 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d96aada38104cc2a2a709023fabf464 as hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d96aada38104cc2a2a709023fabf464 2024-12-06T03:46:53,545 INFO [M:0;6f1b912b0816:42255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d96aada38104cc2a2a709023fabf464, entries=1, sequenceid=29, filesize=5.0 K 2024-12-06T03:46:53,547 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/265d0c33e30140cbb993e0f7957bda15 as hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/265d0c33e30140cbb993e0f7957bda15 2024-12-06T03:46:53,553 INFO [M:0;6f1b912b0816:42255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46535/user/jenkins/test-data/f8fb90cc-75d5-2727-15b3-0f42801bff88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/265d0c33e30140cbb993e0f7957bda15, entries=1, sequenceid=29, filesize=4.9 K 2024-12-06T03:46:53,554 INFO [M:0;6f1b912b0816:42255 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 562ms, sequenceid=29, compaction requested=false 2024-12-06T03:46:53,556 INFO [M:0;6f1b912b0816:42255 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:53,556 DEBUG [M:0;6f1b912b0816:42255 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456812992Disabling compacts and flushes for region at 1733456812992Disabling writes for close at 1733456812992Obtaining lock to block concurrent updates at 1733456812992Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733456812992Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733456812993 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733456812993Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733456812993Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733456813009 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733456813009Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733456813020 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733456813037 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733456813037Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733456813454 (+417 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733456813477 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733456813477Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733456813490 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733456813504 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733456813504Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c58c3e1: reopening flushed file at 1733456813518 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f2a55e8: reopening flushed file at 1733456813527 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bc3075d: reopening flushed file at 1733456813536 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58db2621: reopening flushed file at 1733456813545 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 562ms, sequenceid=29, compaction requested=false at 1733456813554 (+9 ms)Writing region close event to WAL at 1733456813556 (+2 ms)Closed at 1733456813556 2024-12-06T03:46:53,556 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:53,556 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:53,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:53,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:53,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:46:53,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741830_1006 (size=10311) 2024-12-06T03:46:53,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40855 is added to blk_1073741830_1006 (size=10311) 2024-12-06T03:46:53,560 INFO [M:0;6f1b912b0816:42255 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T03:46:53,560 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:46:53,560 INFO [M:0;6f1b912b0816:42255 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42255 2024-12-06T03:46:53,560 INFO [M:0;6f1b912b0816:42255 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:46:53,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:46:53,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:46:53,709 INFO [M:0;6f1b912b0816:42255 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:46:53,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:46:53,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42255-0x101aa0967950000, quorum=127.0.0.1:58256, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:46:53,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11ff445e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:46:53,713 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e63263c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:46:53,713 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:46:53,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4fcb1c4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:46:53,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3168153a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.log.dir/,STOPPED} 2024-12-06T03:46:53,715 WARN [BP-1453088084-172.17.0.2-1733456809335 heartbeating to localhost/127.0.0.1:46535 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:46:53,715 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:46:53,715 WARN [BP-1453088084-172.17.0.2-1733456809335 heartbeating to localhost/127.0.0.1:46535 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1453088084-172.17.0.2-1733456809335 (Datanode Uuid 63298453-577e-46b7-a99c-9bcd8d55da01) service to localhost/127.0.0.1:46535 2024-12-06T03:46:53,715 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:46:53,716 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/data/data3/current/BP-1453088084-172.17.0.2-1733456809335 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:46:53,716 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/data/data4/current/BP-1453088084-172.17.0.2-1733456809335 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:46:53,717 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:46:53,719 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e335929{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:46:53,719 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@726508a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:46:53,720 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:46:53,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33e82987{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:46:53,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@345536c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.log.dir/,STOPPED} 2024-12-06T03:46:53,721 WARN [BP-1453088084-172.17.0.2-1733456809335 heartbeating to localhost/127.0.0.1:46535 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:46:53,721 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:46:53,721 WARN [BP-1453088084-172.17.0.2-1733456809335 heartbeating to localhost/127.0.0.1:46535 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1453088084-172.17.0.2-1733456809335 (Datanode Uuid ba323b6f-5b7f-4c44-ac2b-6e864510ac6e) service to localhost/127.0.0.1:46535 2024-12-06T03:46:53,721 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:46:53,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/data/data1/current/BP-1453088084-172.17.0.2-1733456809335 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:46:53,722 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/cluster_51e59570-b740-ef12-4a7f-152af379278e/data/data2/current/BP-1453088084-172.17.0.2-1733456809335 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:46:53,722 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:46:53,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2281152e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:46:53,728 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3485277{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:46:53,728 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:46:53,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ea6e47a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:46:53,729 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43fab4bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.log.dir/,STOPPED} 2024-12-06T03:46:53,735 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T03:46:53,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T03:46:53,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.log.dir so I do NOT create it in target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7b08b7aa-56b1-767f-7aa2-ad972effddb0/hadoop.tmp.dir so I do NOT create it in target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5, deleteOnExit=true 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/test.cache.data in system properties and HBase conf 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir in system properties and HBase conf 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T03:46:53,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T03:46:53,758 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T03:46:53,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:46:53,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:46:53,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T03:46:53,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:46:53,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T03:46:53,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T03:46:53,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:46:53,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:46:53,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T03:46:53,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/nfs.dump.dir in system properties and HBase conf 2024-12-06T03:46:53,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/java.io.tmpdir in system properties and HBase conf 2024-12-06T03:46:53,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:46:53,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T03:46:53,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T03:46:53,772 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:46:53,957 INFO [regionserver/6f1b912b0816:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:46:54,022 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-06T03:46:54,025 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:46:54,042 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:46:54,044 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:46:54,044 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:46:54,067 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:46:54,072 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:46:54,074 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:46:54,074 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:46:54,074 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:46:54,077 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:46:54,077 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7db14741{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:46:54,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75b4bf6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:46:54,183 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c43bbf6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/java.io.tmpdir/jetty-localhost-36977-hadoop-hdfs-3_4_1-tests_jar-_-any-245574633453454739/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:46:54,184 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b422132{HTTP/1.1, (http/1.1)}{localhost:36977} 2024-12-06T03:46:54,184 INFO [Time-limited test {}] server.Server(415): Started @107049ms 2024-12-06T03:46:54,197 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:46:54,411 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:46:54,415 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:46:54,416 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:46:54,416 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:46:54,416 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:46:54,416 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11e71b6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:46:54,417 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1998e8d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:46:54,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ff23317{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/java.io.tmpdir/jetty-localhost-40881-hadoop-hdfs-3_4_1-tests_jar-_-any-5901544334669060626/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:46:54,528 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24306719{HTTP/1.1, (http/1.1)}{localhost:40881} 2024-12-06T03:46:54,529 INFO [Time-limited test {}] server.Server(415): Started @107394ms 2024-12-06T03:46:54,530 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:46:54,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:46:54,569 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:46:54,570 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:46:54,570 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:46:54,570 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:46:54,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7cbd4eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:46:54,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a58b672{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:46:54,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74f7a593{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/java.io.tmpdir/jetty-localhost-41615-hadoop-hdfs-3_4_1-tests_jar-_-any-10371289979268966036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:46:54,676 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45b5c958{HTTP/1.1, (http/1.1)}{localhost:41615} 2024-12-06T03:46:54,676 INFO [Time-limited test {}] server.Server(415): Started @107541ms 2024-12-06T03:46:54,678 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:46:55,281 WARN [Thread-670 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data1/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:46:55,281 WARN [Thread-671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data2/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:46:55,299 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:46:55,302 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1c9f97ed8f8495e with lease ID 0x32bd57be3303706b: Processing first storage report for DS-09efbea7-e9e5-4a00-ac16-201242145f3f from datanode DatanodeRegistration(127.0.0.1:46451, datanodeUuid=ed585da6-90fd-47cd-9ba8-fa5459490f12, infoPort=34829, infoSecurePort=0, ipcPort=41371, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:46:55,302 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1c9f97ed8f8495e with lease ID 0x32bd57be3303706b: from storage DS-09efbea7-e9e5-4a00-ac16-201242145f3f node DatanodeRegistration(127.0.0.1:46451, datanodeUuid=ed585da6-90fd-47cd-9ba8-fa5459490f12, infoPort=34829, infoSecurePort=0, ipcPort=41371, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T03:46:55,302 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1c9f97ed8f8495e with lease ID 0x32bd57be3303706b: Processing first storage report for DS-78443fc0-640f-4348-b4c8-81ef90843ffb from datanode DatanodeRegistration(127.0.0.1:46451, datanodeUuid=ed585da6-90fd-47cd-9ba8-fa5459490f12, infoPort=34829, infoSecurePort=0, ipcPort=41371, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:46:55,303 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1c9f97ed8f8495e with lease ID 0x32bd57be3303706b: from storage DS-78443fc0-640f-4348-b4c8-81ef90843ffb node DatanodeRegistration(127.0.0.1:46451, datanodeUuid=ed585da6-90fd-47cd-9ba8-fa5459490f12, infoPort=34829, infoSecurePort=0, ipcPort=41371, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:46:55,395 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data3/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:46:55,399 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data4/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:46:55,422 WARN [Thread-657 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:46:55,425 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1eee0633b3cc67ce with lease ID 0x32bd57be3303706c: Processing first storage report for DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa from datanode DatanodeRegistration(127.0.0.1:34521, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=40279, infoSecurePort=0, ipcPort=45895, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:46:55,425 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1eee0633b3cc67ce with lease ID 0x32bd57be3303706c: from storage DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa node DatanodeRegistration(127.0.0.1:34521, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=40279, infoSecurePort=0, ipcPort=45895, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:46:55,425 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1eee0633b3cc67ce with lease ID 0x32bd57be3303706c: Processing first storage report for DS-724c81f0-9d70-4baf-b0ff-a86ee1ce1b4c from datanode DatanodeRegistration(127.0.0.1:34521, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=40279, infoSecurePort=0, ipcPort=45895, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:46:55,425 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1eee0633b3cc67ce with lease ID 0x32bd57be3303706c: from storage DS-724c81f0-9d70-4baf-b0ff-a86ee1ce1b4c node DatanodeRegistration(127.0.0.1:34521, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=40279, infoSecurePort=0, ipcPort=45895, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:46:55,511 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6 2024-12-06T03:46:55,514 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/zookeeper_0, clientPort=56815, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T03:46:55,515 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56815 2024-12-06T03:46:55,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:55,517 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:55,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34521 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:46:55,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46451 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:46:55,528 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726 with version=8 2024-12-06T03:46:55,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/hbase-staging 2024-12-06T03:46:55,530 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:46:55,530 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:55,530 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:55,530 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:46:55,530 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:55,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:46:55,531 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T03:46:55,531 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:46:55,532 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44365 2024-12-06T03:46:55,533 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44365 connecting to ZooKeeper ensemble=127.0.0.1:56815 2024-12-06T03:46:55,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:443650x0, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:46:55,590 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44365-0x101aa09785e0000 connected 2024-12-06T03:46:55,659 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:55,661 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:55,664 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:46:55,664 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726, hbase.cluster.distributed=false 2024-12-06T03:46:55,666 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:46:55,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44365 2024-12-06T03:46:55,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44365 2024-12-06T03:46:55,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44365 2024-12-06T03:46:55,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44365 2024-12-06T03:46:55,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44365 2024-12-06T03:46:55,685 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:46:55,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:55,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:55,685 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:46:55,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:55,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:46:55,685 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T03:46:55,685 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:46:55,686 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37979 2024-12-06T03:46:55,688 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37979 connecting to ZooKeeper ensemble=127.0.0.1:56815 2024-12-06T03:46:55,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:55,691 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:55,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379790x0, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:46:55,701 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379790x0, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:46:55,701 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37979-0x101aa09785e0001 connected 2024-12-06T03:46:55,701 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T03:46:55,702 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T03:46:55,702 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T03:46:55,704 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:46:55,707 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37979 2024-12-06T03:46:55,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37979 2024-12-06T03:46:55,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37979 2024-12-06T03:46:55,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37979 2024-12-06T03:46:55,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37979 2024-12-06T03:46:55,728 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6f1b912b0816:44365 2024-12-06T03:46:55,728 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6f1b912b0816,44365,1733456815530 2024-12-06T03:46:55,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:46:55,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:46:55,740 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6f1b912b0816,44365,1733456815530 2024-12-06T03:46:55,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:55,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T03:46:55,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:55,751 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T03:46:55,751 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6f1b912b0816,44365,1733456815530 from backup master directory 2024-12-06T03:46:55,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:46:55,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6f1b912b0816,44365,1733456815530 2024-12-06T03:46:55,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:46:55,759 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:46:55,759 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6f1b912b0816,44365,1733456815530 2024-12-06T03:46:55,764 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/hbase.id] with ID: e16db0cb-0db5-443d-8c60-20e2dd040594 2024-12-06T03:46:55,764 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/.tmp/hbase.id 2024-12-06T03:46:55,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46451 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:46:55,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34521 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:46:55,783 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/.tmp/hbase.id]:[hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/hbase.id] 2024-12-06T03:46:55,798 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:55,798 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T03:46:55,800 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-06T03:46:55,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:55,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:55,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34521 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:46:55,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46451 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:46:55,818 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:46:55,819 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T03:46:55,819 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:46:55,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46451 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:46:55,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34521 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:46:55,830 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store 2024-12-06T03:46:55,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34521 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:46:55,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46451 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:46:55,840 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:55,840 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:46:55,840 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:55,840 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:55,840 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:46:55,840 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:55,840 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:46:55,840 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456815840Disabling compacts and flushes for region at 1733456815840Disabling writes for close at 1733456815840Writing region close event to WAL at 1733456815840Closed at 1733456815840 2024-12-06T03:46:55,841 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/.initializing 2024-12-06T03:46:55,841 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530 2024-12-06T03:46:55,844 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C44365%2C1733456815530, suffix=, logDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530, archiveDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/oldWALs, maxLogs=10 2024-12-06T03:46:55,845 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C44365%2C1733456815530.1733456815845 2024-12-06T03:46:55,851 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456815845 2024-12-06T03:46:55,855 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34829:34829),(127.0.0.1/127.0.0.1:40279:40279)] 2024-12-06T03:46:55,856 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:46:55,856 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:55,856 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,856 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,858 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T03:46:55,860 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:55,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:55,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T03:46:55,862 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:55,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:46:55,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T03:46:55,865 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:55,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:46:55,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T03:46:55,867 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:55,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:46:55,868 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,868 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,869 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,870 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,871 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,871 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T03:46:55,874 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:46:55,876 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:46:55,877 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845657, jitterRate=0.07530920207500458}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T03:46:55,878 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733456815856Initializing all the Stores at 1733456815857 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456815857Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456815858 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456815858Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456815858Cleaning up temporary data from old regions at 1733456815871 (+13 ms)Region opened successfully at 1733456815878 (+7 ms) 2024-12-06T03:46:55,878 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T03:46:55,882 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a156e13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:46:55,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T03:46:55,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T03:46:55,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T03:46:55,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T03:46:55,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T03:46:55,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-06T03:46:55,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T03:46:55,886 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T03:46:55,887 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T03:46:55,909 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T03:46:55,909 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T03:46:55,910 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T03:46:55,917 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T03:46:55,918 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T03:46:55,919 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T03:46:55,925 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T03:46:55,927 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T03:46:55,934 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T03:46:55,937 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T03:46:55,947 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T03:46:55,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:46:55,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:55,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:46:55,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:55,959 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6f1b912b0816,44365,1733456815530, sessionid=0x101aa09785e0000, setting cluster-up flag (Was=false) 2024-12-06T03:46:55,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:55,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:56,000 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T03:46:56,002 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,44365,1733456815530 2024-12-06T03:46:56,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:56,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:56,051 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T03:46:56,053 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,44365,1733456815530 2024-12-06T03:46:56,054 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T03:46:56,057 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T03:46:56,057 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T03:46:56,058 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T03:46:56,058 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6f1b912b0816,44365,1733456815530 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T03:46:56,060 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:46:56,060 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:46:56,060 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:46:56,060 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:46:56,060 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6f1b912b0816:0, corePoolSize=10, maxPoolSize=10 2024-12-06T03:46:56,060 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,060 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:46:56,060 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,061 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733456846061 2024-12-06T03:46:56,061 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T03:46:56,061 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T03:46:56,061 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T03:46:56,061 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T03:46:56,062 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T03:46:56,062 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T03:46:56,062 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,062 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T03:46:56,063 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T03:46:56,063 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T03:46:56,063 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:46:56,063 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T03:46:56,063 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T03:46:56,063 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T03:46:56,063 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456816063,5,FailOnTimeoutGroup] 2024-12-06T03:46:56,064 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456816063,5,FailOnTimeoutGroup] 2024-12-06T03:46:56,064 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,064 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T03:46:56,064 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,064 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,064 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:56,064 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T03:46:56,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34521 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:46:56,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46451 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:46:56,116 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(746): ClusterId : e16db0cb-0db5-443d-8c60-20e2dd040594 2024-12-06T03:46:56,116 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T03:46:56,149 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T03:46:56,149 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T03:46:56,160 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T03:46:56,161 DEBUG [RS:0;6f1b912b0816:37979 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18f78161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:46:56,177 DEBUG [RS:0;6f1b912b0816:37979 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6f1b912b0816:37979 2024-12-06T03:46:56,177 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T03:46:56,177 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T03:46:56,177 DEBUG [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T03:46:56,178 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(2659): reportForDuty to master=6f1b912b0816,44365,1733456815530 with port=37979, startcode=1733456815684 2024-12-06T03:46:56,178 DEBUG [RS:0;6f1b912b0816:37979 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T03:46:56,180 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33385, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T03:46:56,181 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44365 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6f1b912b0816,37979,1733456815684 2024-12-06T03:46:56,181 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44365 {}] master.ServerManager(517): Registering regionserver=6f1b912b0816,37979,1733456815684 2024-12-06T03:46:56,183 DEBUG [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726 2024-12-06T03:46:56,183 DEBUG [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46387 2024-12-06T03:46:56,183 DEBUG [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T03:46:56,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:46:56,193 DEBUG [RS:0;6f1b912b0816:37979 {}] zookeeper.ZKUtil(111): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6f1b912b0816,37979,1733456815684 2024-12-06T03:46:56,193 WARN [RS:0;6f1b912b0816:37979 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:46:56,193 INFO [RS:0;6f1b912b0816:37979 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:46:56,193 DEBUG [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684 2024-12-06T03:46:56,193 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6f1b912b0816,37979,1733456815684] 2024-12-06T03:46:56,197 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T03:46:56,200 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T03:46:56,200 INFO [RS:0;6f1b912b0816:37979 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T03:46:56,200 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,201 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T03:46:56,202 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T03:46:56,202 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,202 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,202 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,202 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,202 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,202 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,202 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:46:56,203 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,203 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,203 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,203 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,203 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,203 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:56,203 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:46:56,203 DEBUG [RS:0;6f1b912b0816:37979 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:46:56,204 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,204 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,204 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,204 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,204 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,204 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,37979,1733456815684-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:46:56,220 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T03:46:56,220 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,37979,1733456815684-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,220 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,220 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.Replication(171): 6f1b912b0816,37979,1733456815684 started 2024-12-06T03:46:56,235 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:56,235 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(1482): Serving as 6f1b912b0816,37979,1733456815684, RpcServer on 6f1b912b0816/172.17.0.2:37979, sessionid=0x101aa09785e0001 2024-12-06T03:46:56,235 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T03:46:56,235 DEBUG [RS:0;6f1b912b0816:37979 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6f1b912b0816,37979,1733456815684 2024-12-06T03:46:56,235 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,37979,1733456815684' 2024-12-06T03:46:56,235 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T03:46:56,236 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T03:46:56,237 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T03:46:56,237 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T03:46:56,237 DEBUG [RS:0;6f1b912b0816:37979 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6f1b912b0816,37979,1733456815684 2024-12-06T03:46:56,237 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,37979,1733456815684' 2024-12-06T03:46:56,237 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T03:46:56,237 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T03:46:56,237 DEBUG [RS:0;6f1b912b0816:37979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T03:46:56,238 INFO [RS:0;6f1b912b0816:37979 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T03:46:56,238 INFO [RS:0;6f1b912b0816:37979 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T03:46:56,341 INFO [RS:0;6f1b912b0816:37979 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C37979%2C1733456815684, suffix=, logDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684, archiveDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs, maxLogs=32 2024-12-06T03:46:56,343 INFO [RS:0;6f1b912b0816:37979 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C37979%2C1733456815684.1733456816343 2024-12-06T03:46:56,352 INFO [RS:0;6f1b912b0816:37979 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 2024-12-06T03:46:56,355 DEBUG [RS:0;6f1b912b0816:37979 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40279:40279),(127.0.0.1/127.0.0.1:34829:34829)] 2024-12-06T03:46:56,475 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T03:46:56,476 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726 2024-12-06T03:46:56,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46451 is added to blk_1073741833_1009 (size=32) 2024-12-06T03:46:56,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34521 is added to blk_1073741833_1009 (size=32) 2024-12-06T03:46:56,488 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:56,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:46:56,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:46:56,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:56,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:56,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:46:56,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:46:56,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:56,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:56,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:46:56,496 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:46:56,496 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:56,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:56,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:46:56,498 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:46:56,498 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:56,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:56,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:46:56,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740 2024-12-06T03:46:56,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740 2024-12-06T03:46:56,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:46:56,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:46:56,502 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:46:56,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:46:56,506 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:46:56,507 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708808, jitterRate=-0.09870520234107971}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:46:56,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733456816488Initializing all the Stores at 1733456816489 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456816489Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456816489Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456816489Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456816489Cleaning up temporary data from old regions at 1733456816502 (+13 ms)Region opened successfully at 1733456816508 (+6 ms) 2024-12-06T03:46:56,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:46:56,508 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:46:56,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:46:56,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:46:56,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:46:56,508 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:46:56,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456816508Disabling compacts and flushes for region at 1733456816508Disabling writes for close at 1733456816508Writing region close event to WAL at 1733456816508Closed at 1733456816508 2024-12-06T03:46:56,510 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:46:56,510 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T03:46:56,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T03:46:56,512 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:46:56,513 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T03:46:56,663 DEBUG [6f1b912b0816:44365 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T03:46:56,664 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6f1b912b0816,37979,1733456815684 2024-12-06T03:46:56,666 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,37979,1733456815684, state=OPENING 2024-12-06T03:46:56,709 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T03:46:56,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:56,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:46:56,718 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:46:56,718 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:46:56,718 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:46:56,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,37979,1733456815684}] 2024-12-06T03:46:56,873 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T03:46:56,876 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40127, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T03:46:56,881 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T03:46:56,882 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:46:56,884 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C37979%2C1733456815684.meta, suffix=.meta, logDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684, archiveDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs, maxLogs=32 2024-12-06T03:46:56,885 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta 2024-12-06T03:46:56,896 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta 2024-12-06T03:46:56,916 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40279:40279),(127.0.0.1/127.0.0.1:34829:34829)] 2024-12-06T03:46:56,918 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:46:56,918 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T03:46:56,918 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T03:46:56,918 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T03:46:56,919 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T03:46:56,919 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:56,919 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T03:46:56,919 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T03:46:56,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:46:56,921 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:46:56,921 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:56,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:56,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:46:56,923 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:46:56,923 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:56,924 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:56,924 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:46:56,925 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:46:56,925 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:56,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:56,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:46:56,926 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:46:56,926 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:56,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:46:56,927 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:46:56,928 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740 2024-12-06T03:46:56,929 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740 2024-12-06T03:46:56,931 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:46:56,931 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:46:56,931 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:46:56,933 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:46:56,934 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805056, jitterRate=0.023681730031967163}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:46:56,934 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T03:46:56,935 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733456816919Writing region info on filesystem at 1733456816919Initializing all the Stores at 1733456816920 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456816920Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456816920Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456816920Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456816920Cleaning up temporary data from old regions at 1733456816931 (+11 ms)Running coprocessor post-open hooks at 1733456816934 (+3 ms)Region opened successfully at 1733456816935 (+1 ms) 2024-12-06T03:46:56,936 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733456816873 2024-12-06T03:46:56,939 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T03:46:56,939 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T03:46:56,940 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,37979,1733456815684 2024-12-06T03:46:56,941 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,37979,1733456815684, state=OPEN 2024-12-06T03:46:56,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:46:56,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:46:56,991 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6f1b912b0816,37979,1733456815684 2024-12-06T03:46:56,991 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:46:56,991 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:46:56,995 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T03:46:56,995 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,37979,1733456815684 in 273 msec 2024-12-06T03:46:56,999 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T03:46:56,999 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 485 msec 2024-12-06T03:46:57,000 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:46:57,000 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T03:46:57,002 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:46:57,002 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,37979,1733456815684, seqNum=-1] 2024-12-06T03:46:57,002 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:46:57,004 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46621, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:46:57,011 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 953 msec 2024-12-06T03:46:57,011 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733456817011, completionTime=-1 2024-12-06T03:46:57,011 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T03:46:57,011 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T03:46:57,014 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-06T03:46:57,014 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733456877014 2024-12-06T03:46:57,014 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733456937014 2024-12-06T03:46:57,014 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-06T03:46:57,015 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44365,1733456815530-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,015 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44365,1733456815530-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,015 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44365,1733456815530-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,015 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6f1b912b0816:44365, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,015 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,017 DEBUG [master/6f1b912b0816:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T03:46:57,023 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,027 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.268sec 2024-12-06T03:46:57,027 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T03:46:57,027 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T03:46:57,027 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T03:46:57,027 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T03:46:57,027 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T03:46:57,028 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44365,1733456815530-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:46:57,028 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44365,1733456815530-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T03:46:57,030 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T03:46:57,030 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T03:46:57,031 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44365,1733456815530-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,117 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5456168, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:46:57,117 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6f1b912b0816,44365,-1 for getting cluster id 2024-12-06T03:46:57,117 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T03:46:57,121 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e16db0cb-0db5-443d-8c60-20e2dd040594' 2024-12-06T03:46:57,121 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T03:46:57,122 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e16db0cb-0db5-443d-8c60-20e2dd040594" 2024-12-06T03:46:57,122 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72207722, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:46:57,122 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6f1b912b0816,44365,-1] 2024-12-06T03:46:57,122 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T03:46:57,127 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:46:57,129 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50940, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T03:46:57,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@636405cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:46:57,131 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:46:57,132 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,37979,1733456815684, seqNum=-1] 2024-12-06T03:46:57,132 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:46:57,134 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:46:57,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6f1b912b0816,44365,1733456815530 2024-12-06T03:46:57,136 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:57,139 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-06T03:46:57,153 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:46:57,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:57,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:57,154 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:46:57,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:46:57,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:46:57,154 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T03:46:57,154 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:46:57,155 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37887 2024-12-06T03:46:57,156 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37887 connecting to ZooKeeper ensemble=127.0.0.1:56815 2024-12-06T03:46:57,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:57,159 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:46:57,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:378870x0, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:46:57,193 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:378870x0, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-06T03:46:57,193 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-06T03:46:57,193 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37887-0x101aa09785e0002 connected 2024-12-06T03:46:57,194 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T03:46:57,195 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T03:46:57,195 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:37887-0x101aa09785e0002, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T03:46:57,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37887-0x101aa09785e0002, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:46:57,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37887 2024-12-06T03:46:57,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37887 2024-12-06T03:46:57,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37887 2024-12-06T03:46:57,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37887 2024-12-06T03:46:57,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37887 2024-12-06T03:46:57,202 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(746): ClusterId : e16db0cb-0db5-443d-8c60-20e2dd040594 2024-12-06T03:46:57,202 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T03:46:57,209 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T03:46:57,209 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T03:46:57,218 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T03:46:57,219 DEBUG [RS:1;6f1b912b0816:37887 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6401dd96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:46:57,231 DEBUG [RS:1;6f1b912b0816:37887 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;6f1b912b0816:37887 2024-12-06T03:46:57,231 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T03:46:57,231 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T03:46:57,231 DEBUG [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T03:46:57,232 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(2659): reportForDuty to master=6f1b912b0816,44365,1733456815530 with port=37887, startcode=1733456817153 2024-12-06T03:46:57,232 DEBUG [RS:1;6f1b912b0816:37887 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T03:46:57,233 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40053, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T03:46:57,234 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44365 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6f1b912b0816,37887,1733456817153 2024-12-06T03:46:57,234 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44365 {}] master.ServerManager(517): Registering regionserver=6f1b912b0816,37887,1733456817153 2024-12-06T03:46:57,235 DEBUG [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726 2024-12-06T03:46:57,236 DEBUG [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46387 2024-12-06T03:46:57,236 DEBUG [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T03:46:57,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:46:57,242 DEBUG [RS:1;6f1b912b0816:37887 {}] zookeeper.ZKUtil(111): regionserver:37887-0x101aa09785e0002, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6f1b912b0816,37887,1733456817153 2024-12-06T03:46:57,242 WARN [RS:1;6f1b912b0816:37887 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:46:57,242 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6f1b912b0816,37887,1733456817153] 2024-12-06T03:46:57,242 INFO [RS:1;6f1b912b0816:37887 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:46:57,243 DEBUG [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153 2024-12-06T03:46:57,246 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T03:46:57,248 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T03:46:57,249 INFO [RS:1;6f1b912b0816:37887 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T03:46:57,249 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,250 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T03:46:57,251 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T03:46:57,251 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,251 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,251 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,251 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:46:57,252 DEBUG [RS:1;6f1b912b0816:37887 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:46:57,252 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,252 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,253 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,253 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,253 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,253 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,37887,1733456817153-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:46:57,268 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T03:46:57,268 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,37887,1733456817153-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,268 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,268 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.Replication(171): 6f1b912b0816,37887,1733456817153 started 2024-12-06T03:46:57,282 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:46:57,282 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(1482): Serving as 6f1b912b0816,37887,1733456817153, RpcServer on 6f1b912b0816/172.17.0.2:37887, sessionid=0x101aa09785e0002 2024-12-06T03:46:57,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;6f1b912b0816:37887,5,FailOnTimeoutGroup] 2024-12-06T03:46:57,282 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T03:46:57,282 DEBUG [RS:1;6f1b912b0816:37887 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6f1b912b0816,37887,1733456817153 2024-12-06T03:46:57,282 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,37887,1733456817153' 2024-12-06T03:46:57,282 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T03:46:57,283 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-06T03:46:57,283 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-06T03:46:57,283 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T03:46:57,283 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T03:46:57,283 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T03:46:57,283 DEBUG [RS:1;6f1b912b0816:37887 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6f1b912b0816,37887,1733456817153 2024-12-06T03:46:57,283 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,37887,1733456817153' 2024-12-06T03:46:57,283 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T03:46:57,284 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T03:46:57,284 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 6f1b912b0816,44365,1733456815530 2024-12-06T03:46:57,284 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5c54ce47 2024-12-06T03:46:57,284 DEBUG [RS:1;6f1b912b0816:37887 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T03:46:57,284 INFO [RS:1;6f1b912b0816:37887 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T03:46:57,284 INFO [RS:1;6f1b912b0816:37887 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T03:46:57,284 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T03:46:57,286 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T03:46:57,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44365 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T03:46:57,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44365 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T03:46:57,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44365 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:46:57,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44365 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T03:46:57,290 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T03:46:57,290 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:57,290 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44365 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-06T03:46:57,291 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T03:46:57,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44365 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:46:57,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46451 is added to blk_1073741835_1011 (size=393) 2024-12-06T03:46:57,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34521 is added to blk_1073741835_1011 (size=393) 2024-12-06T03:46:57,300 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => be1f405d2ef6b6215b6bdbbe6eeb9550, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726 2024-12-06T03:46:57,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46451 is added to blk_1073741836_1012 (size=76) 2024-12-06T03:46:57,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34521 is added to blk_1073741836_1012 (size=76) 2024-12-06T03:46:57,308 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:57,308 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing be1f405d2ef6b6215b6bdbbe6eeb9550, disabling compactions & flushes 2024-12-06T03:46:57,308 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:46:57,309 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:46:57,309 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. after waiting 0 ms 2024-12-06T03:46:57,309 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:46:57,309 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:46:57,309 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for be1f405d2ef6b6215b6bdbbe6eeb9550: Waiting for close lock at 1733456817308Disabling compacts and flushes for region at 1733456817308Disabling writes for close at 1733456817309 (+1 ms)Writing region close event to WAL at 1733456817309Closed at 1733456817309 2024-12-06T03:46:57,310 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T03:46:57,311 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733456817311"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733456817311"}]},"ts":"1733456817311"} 2024-12-06T03:46:57,314 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-06T03:46:57,315 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T03:46:57,316 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456817315"}]},"ts":"1733456817315"} 2024-12-06T03:46:57,318 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-06T03:46:57,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=be1f405d2ef6b6215b6bdbbe6eeb9550, ASSIGN}] 2024-12-06T03:46:57,321 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=be1f405d2ef6b6215b6bdbbe6eeb9550, ASSIGN 2024-12-06T03:46:57,323 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=be1f405d2ef6b6215b6bdbbe6eeb9550, ASSIGN; state=OFFLINE, location=6f1b912b0816,37979,1733456815684; forceNewPlan=false, retain=false 2024-12-06T03:46:57,387 INFO [RS:1;6f1b912b0816:37887 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C37887%2C1733456817153, suffix=, logDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153, archiveDir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs, maxLogs=32 2024-12-06T03:46:57,388 INFO [RS:1;6f1b912b0816:37887 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C37887%2C1733456817153.1733456817388 2024-12-06T03:46:57,397 INFO [RS:1;6f1b912b0816:37887 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 2024-12-06T03:46:57,399 DEBUG [RS:1;6f1b912b0816:37887 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34829:34829),(127.0.0.1/127.0.0.1:40279:40279)] 2024-12-06T03:46:57,474 INFO [6f1b912b0816:44365 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-06T03:46:57,474 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=be1f405d2ef6b6215b6bdbbe6eeb9550, regionState=OPENING, regionLocation=6f1b912b0816,37979,1733456815684 2024-12-06T03:46:57,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=be1f405d2ef6b6215b6bdbbe6eeb9550, ASSIGN because future has completed 2024-12-06T03:46:57,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure be1f405d2ef6b6215b6bdbbe6eeb9550, server=6f1b912b0816,37979,1733456815684}] 2024-12-06T03:46:57,639 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:46:57,640 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => be1f405d2ef6b6215b6bdbbe6eeb9550, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:46:57,641 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,641 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:46:57,641 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,641 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,643 INFO [StoreOpener-be1f405d2ef6b6215b6bdbbe6eeb9550-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,645 INFO [StoreOpener-be1f405d2ef6b6215b6bdbbe6eeb9550-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be1f405d2ef6b6215b6bdbbe6eeb9550 columnFamilyName info 2024-12-06T03:46:57,645 DEBUG [StoreOpener-be1f405d2ef6b6215b6bdbbe6eeb9550-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:46:57,645 INFO [StoreOpener-be1f405d2ef6b6215b6bdbbe6eeb9550-1 {}] regionserver.HStore(327): Store=be1f405d2ef6b6215b6bdbbe6eeb9550/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:46:57,645 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,646 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,647 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,647 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,647 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,649 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,651 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:46:57,652 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened be1f405d2ef6b6215b6bdbbe6eeb9550; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751488, jitterRate=-0.04443460702896118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T03:46:57,652 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:46:57,653 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for be1f405d2ef6b6215b6bdbbe6eeb9550: Running coprocessor pre-open hook at 1733456817641Writing region info on filesystem at 1733456817641Initializing all the Stores at 1733456817643 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456817643Cleaning up temporary data from old regions at 1733456817647 (+4 ms)Running coprocessor post-open hooks at 1733456817652 (+5 ms)Region opened successfully at 1733456817653 (+1 ms) 2024-12-06T03:46:57,654 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550., pid=6, masterSystemTime=1733456817633 2024-12-06T03:46:57,657 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:46:57,657 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:46:57,658 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=be1f405d2ef6b6215b6bdbbe6eeb9550, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,37979,1733456815684 2024-12-06T03:46:57,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure be1f405d2ef6b6215b6bdbbe6eeb9550, server=6f1b912b0816,37979,1733456815684 because future has completed 2024-12-06T03:46:57,666 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T03:46:57,666 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure be1f405d2ef6b6215b6bdbbe6eeb9550, server=6f1b912b0816,37979,1733456815684 in 183 msec 2024-12-06T03:46:57,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T03:46:57,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=be1f405d2ef6b6215b6bdbbe6eeb9550, ASSIGN in 347 msec 2024-12-06T03:46:57,670 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T03:46:57,671 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456817670"}]},"ts":"1733456817670"} 2024-12-06T03:46:57,674 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-06T03:46:57,675 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T03:46:57,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 388 msec 2024-12-06T03:47:02,423 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T03:47:02,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:02,448 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:02,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:02,451 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:02,461 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-06T03:47:02,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:47:02,975 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-06T03:47:02,976 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T03:47:02,976 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-06T03:47:02,977 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:47:02,977 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-06T03:47:07,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44365 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:47:07,372 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-06T03:47:07,372 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-06T03:47:07,376 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T03:47:07,376 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:47:07,392 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:07,396 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:47:07,397 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:47:07,397 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:47:07,397 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:47:07,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62980799{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:47:07,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7461e1e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:47:07,504 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f0dafb5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/java.io.tmpdir/jetty-localhost-44991-hadoop-hdfs-3_4_1-tests_jar-_-any-13797455285982614095/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:07,504 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3727c2a2{HTTP/1.1, (http/1.1)}{localhost:44991} 2024-12-06T03:47:07,504 INFO [Time-limited test {}] server.Server(415): Started @120369ms 2024-12-06T03:47:07,505 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:47:07,543 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:07,546 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:47:07,547 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:47:07,547 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:47:07,547 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:47:07,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4175b329{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:47:07,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b8dc18b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:47:07,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52162895{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/java.io.tmpdir/jetty-localhost-45351-hadoop-hdfs-3_4_1-tests_jar-_-any-11680195602895221083/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:07,652 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43081444{HTTP/1.1, (http/1.1)}{localhost:45351} 2024-12-06T03:47:07,652 INFO [Time-limited test {}] server.Server(415): Started @120517ms 2024-12-06T03:47:07,654 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:47:07,685 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:07,688 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:47:07,689 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:47:07,689 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:47:07,689 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:47:07,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f70e325{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:47:07,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@740bf9ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:47:07,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a993d46{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/java.io.tmpdir/jetty-localhost-46685-hadoop-hdfs-3_4_1-tests_jar-_-any-279683950009951647/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:07,797 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ae47adb{HTTP/1.1, (http/1.1)}{localhost:46685} 2024-12-06T03:47:07,797 INFO [Time-limited test {}] server.Server(415): Started @120662ms 2024-12-06T03:47:07,798 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:47:08,892 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data5/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:08,892 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data6/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:08,907 WARN [Thread-806 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:47:08,909 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x13cbe729f0428af4 with lease ID 0x32bd57be3303706d: Processing first storage report for DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c from datanode DatanodeRegistration(127.0.0.1:42543, datanodeUuid=861efda2-5ae0-42b0-975f-fd2b6051795a, infoPort=34227, infoSecurePort=0, ipcPort=38501, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:47:08,909 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13cbe729f0428af4 with lease ID 0x32bd57be3303706d: from storage DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c node DatanodeRegistration(127.0.0.1:42543, datanodeUuid=861efda2-5ae0-42b0-975f-fd2b6051795a, infoPort=34227, infoSecurePort=0, ipcPort=38501, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:08,909 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x13cbe729f0428af4 with lease ID 0x32bd57be3303706d: Processing first storage report for DS-416dbaa2-6d4b-4626-b5b6-3f9f0d8bb3d1 from datanode DatanodeRegistration(127.0.0.1:42543, datanodeUuid=861efda2-5ae0-42b0-975f-fd2b6051795a, infoPort=34227, infoSecurePort=0, ipcPort=38501, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:47:08,909 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13cbe729f0428af4 with lease ID 0x32bd57be3303706d: from storage DS-416dbaa2-6d4b-4626-b5b6-3f9f0d8bb3d1 node DatanodeRegistration(127.0.0.1:42543, datanodeUuid=861efda2-5ae0-42b0-975f-fd2b6051795a, infoPort=34227, infoSecurePort=0, ipcPort=38501, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T03:47:08,984 WARN [Thread-876 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data7/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:08,997 WARN [Thread-877 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data8/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:09,013 WARN [Thread-828 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:47:09,015 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x354fa3bc5c044961 with lease ID 0x32bd57be3303706e: Processing first storage report for DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c from datanode DatanodeRegistration(127.0.0.1:36403, datanodeUuid=4e3cd462-b45d-40a1-b7ef-68f35b0933cc, infoPort=39653, infoSecurePort=0, ipcPort=41171, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:47:09,015 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x354fa3bc5c044961 with lease ID 0x32bd57be3303706e: from storage DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c node DatanodeRegistration(127.0.0.1:36403, datanodeUuid=4e3cd462-b45d-40a1-b7ef-68f35b0933cc, infoPort=39653, infoSecurePort=0, ipcPort=41171, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T03:47:09,015 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x354fa3bc5c044961 with lease ID 0x32bd57be3303706e: Processing first storage report for DS-4c8b0307-ccc1-4db3-a24d-a8a4cc99647d from datanode DatanodeRegistration(127.0.0.1:36403, datanodeUuid=4e3cd462-b45d-40a1-b7ef-68f35b0933cc, infoPort=39653, infoSecurePort=0, ipcPort=41171, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:47:09,016 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x354fa3bc5c044961 with lease ID 0x32bd57be3303706e: from storage DS-4c8b0307-ccc1-4db3-a24d-a8a4cc99647d node DatanodeRegistration(127.0.0.1:36403, datanodeUuid=4e3cd462-b45d-40a1-b7ef-68f35b0933cc, infoPort=39653, infoSecurePort=0, ipcPort=41171, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:09,179 WARN [Thread-887 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:09,179 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10/current/BP-1883769982-172.17.0.2-1733456813785/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:09,197 WARN [Thread-850 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:47:09,199 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdfdd539f57c671ba with lease ID 0x32bd57be3303706f: Processing first storage report for DS-00f967fa-f0d1-416d-ba25-6509174d04b1 from datanode DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:47:09,199 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdfdd539f57c671ba with lease ID 0x32bd57be3303706f: from storage DS-00f967fa-f0d1-416d-ba25-6509174d04b1 node DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T03:47:09,199 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdfdd539f57c671ba with lease ID 0x32bd57be3303706f: Processing first storage report for DS-0bb69223-f0be-4096-a99f-35f6fedc62d4 from datanode DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785) 2024-12-06T03:47:09,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdfdd539f57c671ba with lease ID 0x32bd57be3303706f: from storage DS-0bb69223-f0be-4096-a99f-35f6fedc62d4 node DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:09,230 WARN [ResponseProcessor for block BP-1883769982-172.17.0.2-1733456813785:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1883769982-172.17.0.2-1733456813785:blk_1073741832_1008 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,230 WARN [ResponseProcessor for block BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,230 WARN [ResponseProcessor for block BP-1883769982-172.17.0.2-1733456813785:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1883769982-172.17.0.2-1733456813785:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1883769982-172.17.0.2-1733456813785:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,231 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta block BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:09,231 WARN [ResponseProcessor for block BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,231 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 block BP-1883769982-172.17.0.2-1733456813785:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:09,231 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456815845 block BP-1883769982-172.17.0.2-1733456813785:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:09,231 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 block BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:09,231 WARN [PacketResponder: BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34521] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:09,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74f7a593{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:09,233 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45b5c958{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:47:09,233 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:47:09,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:33250 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:34521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33250 dst: /127.0.0.1:34521 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:09,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a58b672{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:47:09,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7cbd4eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,STOPPED} 2024-12-06T03:47:09,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-450925094_22 at /127.0.0.1:52066 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52066 dst: /127.0.0.1:46451 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:09,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:52038 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52038 dst: /127.0.0.1:46451 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:09,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:52016 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:46451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52016 dst: /127.0.0.1:46451 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:09,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1191422372_22 at /127.0.0.1:51990 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51990 dst: /127.0.0.1:46451 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:09,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1191422372_22 at /127.0.0.1:33240 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33240 dst: /127.0.0.1:34521 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:09,233 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:33266 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33266 dst: /127.0.0.1:34521 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:09,235 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-450925094_22 at /127.0.0.1:33290 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33290 dst: /127.0.0.1:34521 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:09,235 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:47:09,235 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1883769982-172.17.0.2-1733456813785 (Datanode Uuid b55de3f7-2a18-4d64-98a6-55c777f5975c) service to localhost/127.0.0.1:46387 2024-12-06T03:47:09,236 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:47:09,236 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:47:09,237 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data4/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:09,237 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data3/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:09,237 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:47:09,240 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ff23317{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:09,240 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24306719{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:47:09,240 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:47:09,240 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456815845 block BP-1883769982-172.17.0.2-1733456813785:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,240 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1998e8d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:47:09,240 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta block BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11e71b6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,STOPPED} 2024-12-06T03:47:09,240 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 block BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,240 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 block BP-1883769982-172.17.0.2-1733456813785:blk_1073741832_1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741832_1008 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,243 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:47:09,243 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:47:09,243 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1883769982-172.17.0.2-1733456813785 (Datanode Uuid ed585da6-90fd-47cd-9ba8-fa5459490f12) service to localhost/127.0.0.1:46387 2024-12-06T03:47:09,243 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:47:09,244 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data1/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:09,244 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data2/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:09,244 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:47:09,248 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550., hostname=6f1b912b0816,37979,1733456815684, seqNum=2] 2024-12-06T03:47:09,250 ERROR [FSHLog-0-hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726-prefix:6f1b912b0816,37979,1733456815684 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,250 WARN [FSHLog-0-hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726-prefix:6f1b912b0816,37979,1733456815684 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,250 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,250 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C37979%2C1733456815684:(num 1733456816343) roll requested 2024-12-06T03:47:09,251 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C37979%2C1733456815684.1733456829250 2024-12-06T03:47:09,253 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,258 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:09,258 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:09,258 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:09,258 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:09,258 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:09,258 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456829250 2024-12-06T03:47:09,259 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,259 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:09,260 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-06T03:47:09,260 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-06T03:47:09,261 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 2024-12-06T03:47:09,263 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34227:34227),(127.0.0.1/127.0.0.1:39653:39653)] 2024-12-06T03:47:09,263 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 is not closed yet, will try archiving it next time 2024-12-06T03:47:09,264 WARN [IPC Server handler 3 on default port 46387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741832_1008 2024-12-06T03:47:09,269 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 after 5ms 2024-12-06T03:47:09,821 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:11,253 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:11,263 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:11,265 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456829250 2024-12-06T03:47:11,265 WARN [ResponseProcessor for block BP-1883769982-172.17.0.2-1733456813785:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1883769982-172.17.0.2-1733456813785:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:11,265 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456829250 block BP-1883769982-172.17.0.2-1733456813785:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:11,266 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:52396 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:42543:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52396 dst: /127.0.0.1:42543 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:11,266 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:59282 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36403:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59282 dst: /127.0.0.1:36403 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:11,267 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f0dafb5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:11,268 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3727c2a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:47:11,268 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:47:11,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7461e1e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:47:11,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62980799{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,STOPPED} 2024-12-06T03:47:11,270 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:47:11,270 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:47:11,270 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1883769982-172.17.0.2-1733456813785 (Datanode Uuid 861efda2-5ae0-42b0-975f-fd2b6051795a) service to localhost/127.0.0.1:46387 2024-12-06T03:47:11,270 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:47:11,271 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data5/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:11,271 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data6/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:11,271 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:47:11,821 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:13,254 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:13,264 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:13,264 WARN [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]] 2024-12-06T03:47:13,264 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C37979%2C1733456815684:(num 1733456829250) roll requested 2024-12-06T03:47:13,265 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C37979%2C1733456815684.1733456833264 2024-12-06T03:47:13,268 WARN [Thread-907 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:13,268 WARN [Thread-907 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:13,268 WARN [Thread-907 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741839_1021 2024-12-06T03:47:13,270 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 after 4009ms 2024-12-06T03:47:13,271 WARN [Thread-907 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK] 2024-12-06T03:47:13,276 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T03:47:13,284 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:13,284 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:13,285 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:13,285 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:13,285 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:13,285 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456829250 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456833264 2024-12-06T03:47:13,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36403 is added to blk_1073741838_1020 (size=3600) 2024-12-06T03:47:13,288 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 is not closed yet, will try archiving it next time 2024-12-06T03:47:13,289 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46431:46431),(127.0.0.1/127.0.0.1:39653:39653)] 2024-12-06T03:47:13,289 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 is not closed yet, will try archiving it next time 2024-12-06T03:47:13,821 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,254 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,280 WARN [ResponseProcessor for block BP-1883769982-172.17.0.2-1733456813785:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1883769982-172.17.0.2-1733456813785:blk_1073741840_1022 java.io.IOException: Bad response ERROR for BP-1883769982-172.17.0.2-1733456813785:blk_1073741840_1022 from datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,280 WARN [DataStreamer for file /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456833264 block BP-1883769982-172.17.0.2-1733456813785:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:15,280 WARN [PacketResponder: BP-1883769982-172.17.0.2-1733456813785:blk_1073741840_1022, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36403] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35320 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:45127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35320 dst: /127.0.0.1:45127 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:43400 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:36403:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43400 dst: /127.0.0.1:36403 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,289 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,289 WARN [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]] 2024-12-06T03:47:15,289 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C37979%2C1733456815684:(num 1733456833264) roll requested 2024-12-06T03:47:15,290 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C37979%2C1733456815684.1733456835289 2024-12-06T03:47:15,292 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,293 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:15,293 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741841_1024 2024-12-06T03:47:15,293 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK] 2024-12-06T03:47:15,294 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,295 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:15,295 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741842_1025 2024-12-06T03:47:15,295 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK] 2024-12-06T03:47:15,297 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36403 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,297 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35344 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10]'}, localName='127.0.0.1:45127', datanodeUuid='e696cafe-43c2-49d4-a640-d382b189f5dd', xmitsInProgress=0}:Exception transferring block BP-1883769982-172.17.0.2-1733456813785:blk_1073741843_1026 to mirror 127.0.0.1:36403 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,298 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:15,298 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741843_1026 2024-12-06T03:47:15,298 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35344 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T03:47:15,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35344 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:45127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35344 dst: /127.0.0.1:45127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,298 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:15,299 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,300 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]) is bad. 2024-12-06T03:47:15,300 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741844_1027 2024-12-06T03:47:15,300 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK] 2024-12-06T03:47:15,301 WARN [IPC Server handler 3 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T03:47:15,301 WARN [IPC Server handler 3 on default port 46387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T03:47:15,301 WARN [IPC Server handler 3 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T03:47:15,303 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:15,303 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:15,303 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:15,304 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:15,304 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:15,304 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456833264 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456835289 2024-12-06T03:47:15,304 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46431:46431)] 2024-12-06T03:47:15,305 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 is not closed yet, will try archiving it next time 2024-12-06T03:47:15,305 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456833264 is not closed yet, will try archiving it next time 2024-12-06T03:47:15,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741840_1023 (size=93) 2024-12-06T03:47:15,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52162895{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:15,313 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43081444{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:47:15,313 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:47:15,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b8dc18b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:47:15,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4175b329{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,STOPPED} 2024-12-06T03:47:15,314 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:47:15,314 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:47:15,314 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1883769982-172.17.0.2-1733456813785 (Datanode Uuid 4e3cd462-b45d-40a1-b7ef-68f35b0933cc) service to localhost/127.0.0.1:46387 2024-12-06T03:47:15,314 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:47:15,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data7/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:15,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data8/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:15,315 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:47:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] regionserver.HRegion(8855): Flush requested on be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:47:15,323 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing be1f405d2ef6b6215b6bdbbe6eeb9550 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:47:15,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/71748fe632834110ae0d9fc75c5ea59d is 1080, key is row0002/info:/1733456831272/Put/seqid=0 2024-12-06T03:47:15,343 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,343 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:15,343 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741846_1029 2024-12-06T03:47:15,344 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK] 2024-12-06T03:47:15,346 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,346 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]) is bad. 2024-12-06T03:47:15,346 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741847_1030 2024-12-06T03:47:15,346 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK] 2024-12-06T03:47:15,349 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36403 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,349 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35362 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10]'}, localName='127.0.0.1:45127', datanodeUuid='e696cafe-43c2-49d4-a640-d382b189f5dd', xmitsInProgress=0}:Exception transferring block BP-1883769982-172.17.0.2-1733456813785:blk_1073741848_1031 to mirror 127.0.0.1:36403 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,349 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:15,349 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741848_1031 2024-12-06T03:47:15,349 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35362 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-06T03:47:15,349 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35362 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:45127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35362 dst: /127.0.0.1:45127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,349 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:15,351 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34521 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,351 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35378 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10]'}, localName='127.0.0.1:45127', datanodeUuid='e696cafe-43c2-49d4-a640-d382b189f5dd', xmitsInProgress=0}:Exception transferring block BP-1883769982-172.17.0.2-1733456813785:blk_1073741849_1032 to mirror 127.0.0.1:34521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,352 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:15,352 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741849_1032 2024-12-06T03:47:15,352 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35378 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-06T03:47:15,352 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35378 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:45127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35378 dst: /127.0.0.1:45127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,352 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK] 2024-12-06T03:47:15,353 WARN [IPC Server handler 4 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T03:47:15,353 WARN [IPC Server handler 4 on default port 46387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T03:47:15,353 WARN [IPC Server handler 4 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T03:47:15,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741850_1033 (size=10347) 2024-12-06T03:47:15,706 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 is not closed yet, will try archiving it next time 2024-12-06T03:47:15,707 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456833264 to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs/6f1b912b0816%2C37979%2C1733456815684.1733456833264 2024-12-06T03:47:15,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/71748fe632834110ae0d9fc75c5ea59d 2024-12-06T03:47:15,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/71748fe632834110ae0d9fc75c5ea59d as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/71748fe632834110ae0d9fc75c5ea59d 2024-12-06T03:47:15,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/71748fe632834110ae0d9fc75c5ea59d, entries=5, sequenceid=11, filesize=10.1 K 2024-12-06T03:47:15,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for be1f405d2ef6b6215b6bdbbe6eeb9550 in 447ms, sequenceid=11, compaction requested=false 2024-12-06T03:47:15,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for be1f405d2ef6b6215b6bdbbe6eeb9550: 2024-12-06T03:47:15,822 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] regionserver.HRegion(8855): Flush requested on be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:47:15,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing be1f405d2ef6b6215b6bdbbe6eeb9550 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-06T03:47:15,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/7448f66feac34475a7e2f5c62b3a8e47 is 1080, key is row0007/info:/1733456835324/Put/seqid=0 2024-12-06T03:47:15,957 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,957 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]) is bad. 2024-12-06T03:47:15,957 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741851_1034 2024-12-06T03:47:15,958 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK] 2024-12-06T03:47:15,959 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,959 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:15,959 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741852_1035 2024-12-06T03:47:15,960 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK] 2024-12-06T03:47:15,962 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36403 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35408 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10]'}, localName='127.0.0.1:45127', datanodeUuid='e696cafe-43c2-49d4-a640-d382b189f5dd', xmitsInProgress=0}:Exception transferring block BP-1883769982-172.17.0.2-1733456813785:blk_1073741853_1036 to mirror 127.0.0.1:36403 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,962 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:15,962 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741853_1036 2024-12-06T03:47:15,962 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35408 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-06T03:47:15,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35408 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:45127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35408 dst: /127.0.0.1:45127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,963 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:15,965 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35412 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10]'}, localName='127.0.0.1:45127', datanodeUuid='e696cafe-43c2-49d4-a640-d382b189f5dd', xmitsInProgress=0}:Exception transferring block BP-1883769982-172.17.0.2-1733456813785:blk_1073741854_1037 to mirror 127.0.0.1:34521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,965 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34521 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:15,965 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:15,965 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35412 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-06T03:47:15,965 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741854_1037 2024-12-06T03:47:15,965 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35412 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:45127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35412 dst: /127.0.0.1:45127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:15,966 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK] 2024-12-06T03:47:15,967 WARN [IPC Server handler 2 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T03:47:15,967 WARN [IPC Server handler 2 on default port 46387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T03:47:15,967 WARN [IPC Server handler 2 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T03:47:15,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741855_1038 (size=12506) 2024-12-06T03:47:16,208 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@595b0cdd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741840_1023 to 127.0.0.1:46451 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:16,208 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40318e5e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741850_1033 to 127.0.0.1:42543 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:16,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/7448f66feac34475a7e2f5c62b3a8e47 2024-12-06T03:47:16,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/7448f66feac34475a7e2f5c62b3a8e47 as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7448f66feac34475a7e2f5c62b3a8e47 2024-12-06T03:47:16,389 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7448f66feac34475a7e2f5c62b3a8e47, entries=7, sequenceid=24, filesize=12.2 K 2024-12-06T03:47:16,390 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for be1f405d2ef6b6215b6bdbbe6eeb9550 in 441ms, sequenceid=24, compaction requested=false 2024-12-06T03:47:16,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for be1f405d2ef6b6215b6bdbbe6eeb9550: 2024-12-06T03:47:16,391 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-06T03:47:16,391 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:16,391 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7448f66feac34475a7e2f5c62b3a8e47 because midkey is the same as first or last row 2024-12-06T03:47:17,254 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,305 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,305 WARN [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]] 2024-12-06T03:47:17,305 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C37979%2C1733456815684:(num 1733456835289) roll requested 2024-12-06T03:47:17,305 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C37979%2C1733456815684.1733456837305 2024-12-06T03:47:17,308 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,308 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:17,308 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741856_1039 2024-12-06T03:47:17,309 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:17,310 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,311 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]) is bad. 2024-12-06T03:47:17,311 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741857_1040 2024-12-06T03:47:17,312 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK] 2024-12-06T03:47:17,315 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42543 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,315 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35438 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10]'}, localName='127.0.0.1:45127', datanodeUuid='e696cafe-43c2-49d4-a640-d382b189f5dd', xmitsInProgress=0}:Exception transferring block BP-1883769982-172.17.0.2-1733456813785:blk_1073741858_1041 to mirror 127.0.0.1:42543 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:17,315 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK], DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:17,315 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741858_1041 2024-12-06T03:47:17,315 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35438 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T03:47:17,315 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35438 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:45127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35438 dst: /127.0.0.1:45127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:17,316 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK] 2024-12-06T03:47:17,322 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34521 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,322 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35442 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10]'}, localName='127.0.0.1:45127', datanodeUuid='e696cafe-43c2-49d4-a640-d382b189f5dd', xmitsInProgress=0}:Exception transferring block BP-1883769982-172.17.0.2-1733456813785:blk_1073741859_1042 to mirror 127.0.0.1:34521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:17,323 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:17,323 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35442 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T03:47:17,323 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741859_1042 2024-12-06T03:47:17,323 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35442 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:45127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35442 dst: /127.0.0.1:45127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:17,324 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK] 2024-12-06T03:47:17,325 WARN [IPC Server handler 3 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T03:47:17,325 WARN [IPC Server handler 3 on default port 46387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T03:47:17,325 WARN [IPC Server handler 3 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T03:47:17,328 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:17,328 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:17,328 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:17,329 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:17,329 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:17,329 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456835289 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456837305 2024-12-06T03:47:17,330 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46431:46431)] 2024-12-06T03:47:17,330 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 is not closed yet, will try archiving it next time 2024-12-06T03:47:17,330 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456835289 is not closed yet, will try archiving it next time 2024-12-06T03:47:17,330 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456829250 to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs/6f1b912b0816%2C37979%2C1733456815684.1733456829250 2024-12-06T03:47:17,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741845_1028 (size=24823) 2024-12-06T03:47:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] regionserver.HRegion(8855): Flush requested on be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:47:17,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing be1f405d2ef6b6215b6bdbbe6eeb9550 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-06T03:47:17,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/e95d3c58d7c04e16b64a8ea9c135820c is 1079, key is tmprow/info:/1733456837368/Put/seqid=0 2024-12-06T03:47:17,376 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,376 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]) is bad. 2024-12-06T03:47:17,377 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741861_1044 2024-12-06T03:47:17,377 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK] 2024-12-06T03:47:17,379 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,379 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:17,379 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741862_1045 2024-12-06T03:47:17,379 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK] 2024-12-06T03:47:17,381 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,381 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:17,381 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741863_1046 2024-12-06T03:47:17,382 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:17,383 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,384 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:17,384 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741864_1047 2024-12-06T03:47:17,384 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK] 2024-12-06T03:47:17,385 WARN [IPC Server handler 1 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T03:47:17,385 WARN [IPC Server handler 1 on default port 46387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T03:47:17,385 WARN [IPC Server handler 1 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T03:47:17,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741865_1048 (size=6027) 2024-12-06T03:47:17,732 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 is not closed yet, will try archiving it next time 2024-12-06T03:47:17,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/e95d3c58d7c04e16b64a8ea9c135820c 2024-12-06T03:47:17,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/e95d3c58d7c04e16b64a8ea9c135820c as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e95d3c58d7c04e16b64a8ea9c135820c 2024-12-06T03:47:17,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e95d3c58d7c04e16b64a8ea9c135820c, entries=1, sequenceid=34, filesize=5.9 K 2024-12-06T03:47:17,810 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for be1f405d2ef6b6215b6bdbbe6eeb9550 in 441ms, sequenceid=34, compaction requested=true 2024-12-06T03:47:17,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for be1f405d2ef6b6215b6bdbbe6eeb9550: 2024-12-06T03:47:17,811 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-06T03:47:17,811 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:17,811 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7448f66feac34475a7e2f5c62b3a8e47 because midkey is the same as first or last row 2024-12-06T03:47:17,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be1f405d2ef6b6215b6bdbbe6eeb9550:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:47:17,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:47:17,815 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:47:17,817 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:47:17,817 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HStore(1541): be1f405d2ef6b6215b6bdbbe6eeb9550/info is initiating minor compaction (all files) 2024-12-06T03:47:17,818 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of be1f405d2ef6b6215b6bdbbe6eeb9550/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:47:17,818 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/71748fe632834110ae0d9fc75c5ea59d, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7448f66feac34475a7e2f5c62b3a8e47, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e95d3c58d7c04e16b64a8ea9c135820c] into tmpdir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp, totalSize=28.2 K 2024-12-06T03:47:17,819 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 71748fe632834110ae0d9fc75c5ea59d, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733456831272 2024-12-06T03:47:17,820 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7448f66feac34475a7e2f5c62b3a8e47, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733456835324 2024-12-06T03:47:17,820 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.Compactor(225): Compacting e95d3c58d7c04e16b64a8ea9c135820c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733456837368 2024-12-06T03:47:17,822 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,843 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be1f405d2ef6b6215b6bdbbe6eeb9550#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:47:17,844 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/7773614e73274fa7a0e655c288709d4c is 1080, key is row0002/info:/1733456831272/Put/seqid=0 2024-12-06T03:47:17,846 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,847 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:17,847 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741866_1049 2024-12-06T03:47:17,847 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK] 2024-12-06T03:47:17,849 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,849 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]) is bad. 2024-12-06T03:47:17,850 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741867_1050 2024-12-06T03:47:17,850 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK] 2024-12-06T03:47:17,852 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,852 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:17,852 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741868_1051 2024-12-06T03:47:17,853 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK] 2024-12-06T03:47:17,855 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:17,855 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:17,855 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741869_1052 2024-12-06T03:47:17,856 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:17,857 WARN [IPC Server handler 2 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T03:47:17,857 WARN [IPC Server handler 2 on default port 46387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T03:47:17,857 WARN [IPC Server handler 2 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T03:47:17,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741870_1053 (size=17994) 2024-12-06T03:47:17,877 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/7773614e73274fa7a0e655c288709d4c as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c 2024-12-06T03:47:17,892 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in be1f405d2ef6b6215b6bdbbe6eeb9550/info of be1f405d2ef6b6215b6bdbbe6eeb9550 into 7773614e73274fa7a0e655c288709d4c(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:47:17,892 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for be1f405d2ef6b6215b6bdbbe6eeb9550: 2024-12-06T03:47:17,892 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550., storeName=be1f405d2ef6b6215b6bdbbe6eeb9550/info, priority=13, startTime=1733456837811; duration=0sec 2024-12-06T03:47:17,892 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-06T03:47:17,892 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:17,893 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c because midkey is the same as first or last row 2024-12-06T03:47:17,893 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-06T03:47:17,893 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:17,893 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c because midkey is the same as first or last row 2024-12-06T03:47:17,893 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-06T03:47:17,893 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:17,893 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c because midkey is the same as first or last row 2024-12-06T03:47:17,893 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:47:17,893 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be1f405d2ef6b6215b6bdbbe6eeb9550:info 2024-12-06T03:47:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] regionserver.HRegion(8855): Flush requested on be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:47:18,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing be1f405d2ef6b6215b6bdbbe6eeb9550 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-06T03:47:18,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/9c22af9e2acc4d7d8002b7d568197dfd is 1079, key is tmprow/info:/1733456838795/Put/seqid=0 2024-12-06T03:47:18,803 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:18,804 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:18,804 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741871_1054 2024-12-06T03:47:18,804 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:18,807 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42543 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:18,807 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35500 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10]'}, localName='127.0.0.1:45127', datanodeUuid='e696cafe-43c2-49d4-a640-d382b189f5dd', xmitsInProgress=0}:Exception transferring block BP-1883769982-172.17.0.2-1733456813785:blk_1073741872_1055 to mirror 127.0.0.1:42543 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:18,807 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK], DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:18,807 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741872_1055 2024-12-06T03:47:18,807 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35500 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-06T03:47:18,807 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:35500 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:45127:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35500 dst: /127.0.0.1:45127 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:18,808 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK] 2024-12-06T03:47:18,809 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:18,810 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]) is bad. 2024-12-06T03:47:18,810 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741873_1056 2024-12-06T03:47:18,810 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34521,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK] 2024-12-06T03:47:18,812 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:18,812 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]) is bad. 2024-12-06T03:47:18,812 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741874_1057 2024-12-06T03:47:18,813 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK] 2024-12-06T03:47:18,814 WARN [IPC Server handler 3 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T03:47:18,814 WARN [IPC Server handler 3 on default port 46387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T03:47:18,814 WARN [IPC Server handler 3 on default port 46387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T03:47:18,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741875_1058 (size=6027) 2024-12-06T03:47:18,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/9c22af9e2acc4d7d8002b7d568197dfd 2024-12-06T03:47:18,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/9c22af9e2acc4d7d8002b7d568197dfd as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/9c22af9e2acc4d7d8002b7d568197dfd 2024-12-06T03:47:18,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/9c22af9e2acc4d7d8002b7d568197dfd, entries=1, sequenceid=45, filesize=5.9 K 2024-12-06T03:47:18,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for be1f405d2ef6b6215b6bdbbe6eeb9550 in 45ms, sequenceid=45, compaction requested=false 2024-12-06T03:47:18,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for be1f405d2ef6b6215b6bdbbe6eeb9550: 2024-12-06T03:47:18,841 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-06T03:47:18,841 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:18,841 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c because midkey is the same as first or last row 2024-12-06T03:47:19,199 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@595b0cdd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741855_1038 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:19,199 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40318e5e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741845_1028 to 127.0.0.1:34521 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:19,255 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:19,330 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:19,330 WARN [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-06T03:47:19,425 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:19,434 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:47:19,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:47:19,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:47:19,439 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:47:19,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c767c56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:47:19,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@721d4444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:47:19,568 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74e983d1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/java.io.tmpdir/jetty-localhost-45219-hadoop-hdfs-3_4_1-tests_jar-_-any-15261088817936732711/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:19,569 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@48ed1e93{HTTP/1.1, (http/1.1)}{localhost:45219} 2024-12-06T03:47:19,569 INFO [Time-limited test {}] server.Server(415): Started @132434ms 2024-12-06T03:47:19,570 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:47:19,822 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:19,988 WARN [Thread-973 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:47:20,012 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f530d5a5c23dee2 with lease ID 0x32bd57be33037070: from storage DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa node DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T03:47:20,013 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f530d5a5c23dee2 with lease ID 0x32bd57be33037070: from storage DS-724c81f0-9d70-4baf-b0ff-a86ee1ce1b4c node DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:20,200 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@595b0cdd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741865_1048 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:20,200 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40318e5e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741870_1053 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:21,255 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:21,331 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:21,823 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:22,200 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40318e5e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45127, datanodeUuid=e696cafe-43c2-49d4-a640-d382b189f5dd, infoPort=46431, infoSecurePort=0, ipcPort=46765, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741875_1058 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:23,256 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:23,331 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:23,823 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:25,256 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:25,331 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:25,510 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T03:47:25,824 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:26,063 ERROR [FSHLog-0-hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData-prefix:6f1b912b0816,44365,1733456815530 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:26,064 WARN [FSHLog-0-hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData-prefix:6f1b912b0816,44365,1733456815530 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:26,064 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C44365%2C1733456815530:(num 1733456815845) roll requested 2024-12-06T03:47:26,065 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C44365%2C1733456815530.1733456846065 2024-12-06T03:47:26,071 WARN [Thread-993 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:26,071 WARN [Thread-993 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK]) is bad. 2024-12-06T03:47:26,071 WARN [Thread-993 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741876_1059 2024-12-06T03:47:26,072 WARN [Thread-993 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42543,DS-c3977df0-8f2c-4e6d-b5bc-e58a8f39bf6c,DISK] 2024-12-06T03:47:26,074 WARN [Thread-993 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:26,074 WARN [Thread-993 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:26,075 WARN [Thread-993 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741877_1060 2024-12-06T03:47:26,075 WARN [Thread-993 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:26,077 WARN [Thread-993 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:26,077 WARN [Thread-993 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]) is bad. 2024-12-06T03:47:26,077 WARN [Thread-993 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741878_1061 2024-12-06T03:47:26,078 WARN [Thread-993 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK] 2024-12-06T03:47:26,083 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:26,083 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:26,083 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:26,083 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:26,083 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:26,084 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456815845 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456846065 2024-12-06T03:47:26,084 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:26,084 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:26,084 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456815845 2024-12-06T03:47:26,084 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46431:46431),(127.0.0.1/127.0.0.1:37639:37639)] 2024-12-06T03:47:26,084 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456815845 is not closed yet, will try archiving it next time 2024-12-06T03:47:26,085 WARN [IPC Server handler 0 on default port 46387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456815845 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1006 2024-12-06T03:47:26,085 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456815845 after 1ms 2024-12-06T03:47:27,257 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:27,332 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:29,257 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:29,332 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:30,036 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@72ed8f7b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1883769982-172.17.0.2-1733456813785:blk_1073741832_1008, datanode=DatanodeInfoWithStorage[127.0.0.1:46451,null,null]) java.net.ConnectException: Call From 6f1b912b0816/172.17.0.2 to localhost:41371 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-06T03:47:30,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741832_1019 (size=455) 2024-12-06T03:47:30,086 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/WALs/6f1b912b0816,44365,1733456815530/6f1b912b0816%2C44365%2C1733456815530.1733456815845 after 4002ms 2024-12-06T03:47:30,291 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456816343 to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs/6f1b912b0816%2C37979%2C1733456815684.1733456816343 2024-12-06T03:47:30,294 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456835289 to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs/6f1b912b0816%2C37979%2C1733456815684.1733456835289 2024-12-06T03:47:30,997 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4355ff0e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741835_1011 to 127.0.0.1:42543 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:30,997 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ccaa081[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741833_1009 to 127.0.0.1:42543 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:31,258 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:31,333 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:31,996 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ccaa081[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741831_1007 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:31,997 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4355ff0e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741829_1005 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:33,259 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:33,334 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:33,997 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4355ff0e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741826_1002 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:33,997 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ccaa081[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741832_1019 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:35,025 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C37979%2C1733456815684.1733456855025 2024-12-06T03:47:35,032 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,032 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,032 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,032 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,033 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,033 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456837305 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456855025 2024-12-06T03:47:35,034 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37639:37639),(127.0.0.1/127.0.0.1:46431:46431)] 2024-12-06T03:47:35,034 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456837305 is not closed yet, will try archiving it next time 2024-12-06T03:47:35,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741860_1043 (size=13591) 2024-12-06T03:47:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] regionserver.HRegion(8855): Flush requested on be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:47:35,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing be1f405d2ef6b6215b6bdbbe6eeb9550 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-06T03:47:35,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/275bdac65df346d9a0dbe3f70f49ddff is 1080, key is row0013/info:/1733456855035/Put/seqid=0 2024-12-06T03:47:35,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741881_1065 (size=11421) 2024-12-06T03:47:35,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741881_1065 (size=11421) 2024-12-06T03:47:35,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/275bdac65df346d9a0dbe3f70f49ddff 2024-12-06T03:47:35,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/275bdac65df346d9a0dbe3f70f49ddff as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/275bdac65df346d9a0dbe3f70f49ddff 2024-12-06T03:47:35,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/275bdac65df346d9a0dbe3f70f49ddff, entries=6, sequenceid=55, filesize=11.2 K 2024-12-06T03:47:35,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for be1f405d2ef6b6215b6bdbbe6eeb9550 in 28ms, sequenceid=55, compaction requested=true 2024-12-06T03:47:35,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for be1f405d2ef6b6215b6bdbbe6eeb9550: 2024-12-06T03:47:35,073 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-06T03:47:35,073 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:35,073 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c because midkey is the same as first or last row 2024-12-06T03:47:35,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be1f405d2ef6b6215b6bdbbe6eeb9550:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:47:35,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:47:35,073 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:47:35,074 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:47:35,074 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HStore(1541): be1f405d2ef6b6215b6bdbbe6eeb9550/info is initiating minor compaction (all files) 2024-12-06T03:47:35,074 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of be1f405d2ef6b6215b6bdbbe6eeb9550/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:47:35,075 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/9c22af9e2acc4d7d8002b7d568197dfd, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/275bdac65df346d9a0dbe3f70f49ddff] into tmpdir=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp, totalSize=34.6 K 2024-12-06T03:47:35,075 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7773614e73274fa7a0e655c288709d4c, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733456831272 2024-12-06T03:47:35,076 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9c22af9e2acc4d7d8002b7d568197dfd, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733456838795 2024-12-06T03:47:35,076 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 275bdac65df346d9a0dbe3f70f49ddff, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733456839201 2024-12-06T03:47:35,093 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be1f405d2ef6b6215b6bdbbe6eeb9550#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:47:35,094 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/e52a8b74764b413983ca6abb3dd53170 is 1080, key is row0002/info:/1733456831272/Put/seqid=0 2024-12-06T03:47:35,095 WARN [Thread-1019 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,096 WARN [Thread-1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:35,096 WARN [Thread-1019 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741882_1066 2024-12-06T03:47:35,096 WARN [Thread-1019 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:35,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741883_1067 (size=23502) 2024-12-06T03:47:35,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741883_1067 (size=23502) 2024-12-06T03:47:35,108 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/e52a8b74764b413983ca6abb3dd53170 as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e52a8b74764b413983ca6abb3dd53170 2024-12-06T03:47:35,118 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in be1f405d2ef6b6215b6bdbbe6eeb9550/info of be1f405d2ef6b6215b6bdbbe6eeb9550 into e52a8b74764b413983ca6abb3dd53170(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:47:35,118 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for be1f405d2ef6b6215b6bdbbe6eeb9550: 2024-12-06T03:47:35,118 INFO [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550., storeName=be1f405d2ef6b6215b6bdbbe6eeb9550/info, priority=13, startTime=1733456855073; duration=0sec 2024-12-06T03:47:35,118 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-06T03:47:35,118 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:35,118 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e52a8b74764b413983ca6abb3dd53170 because midkey is the same as first or last row 2024-12-06T03:47:35,118 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-06T03:47:35,118 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:35,118 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e52a8b74764b413983ca6abb3dd53170 because midkey is the same as first or last row 2024-12-06T03:47:35,118 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-06T03:47:35,118 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:35,119 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e52a8b74764b413983ca6abb3dd53170 because midkey is the same as first or last row 2024-12-06T03:47:35,119 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:47:35,119 DEBUG [RS:0;6f1b912b0816:37979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be1f405d2ef6b6215b6bdbbe6eeb9550:info 2024-12-06T03:47:35,259 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] regionserver.HRegion(8855): Flush requested on be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:47:35,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing be1f405d2ef6b6215b6bdbbe6eeb9550 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-06T03:47:35,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/7bb1e80d4be14f8d8442ef704183bbe0 is 1080, key is row0018/info:/1733456855045/Put/seqid=0 2024-12-06T03:47:35,269 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,269 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:35,269 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741884_1068 2024-12-06T03:47:35,270 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:35,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741885_1069 (size=11421) 2024-12-06T03:47:35,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741885_1069 (size=11421) 2024-12-06T03:47:35,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/7bb1e80d4be14f8d8442ef704183bbe0 2024-12-06T03:47:35,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/.tmp/info/7bb1e80d4be14f8d8442ef704183bbe0 as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7bb1e80d4be14f8d8442ef704183bbe0 2024-12-06T03:47:35,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7bb1e80d4be14f8d8442ef704183bbe0, entries=6, sequenceid=66, filesize=11.2 K 2024-12-06T03:47:35,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for be1f405d2ef6b6215b6bdbbe6eeb9550 in 31ms, sequenceid=66, compaction requested=false 2024-12-06T03:47:35,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for be1f405d2ef6b6215b6bdbbe6eeb9550: 2024-12-06T03:47:35,292 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-12-06T03:47:35,292 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:47:35,292 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e52a8b74764b413983ca6abb3dd53170 because midkey is the same as first or last row 2024-12-06T03:47:35,334 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,334 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-06T03:47:35,437 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.1733456837305 to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs/6f1b912b0816%2C37979%2C1733456815684.1733456837305 2024-12-06T03:47:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T03:47:35,462 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:47:35,463 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:47:35,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:47:35,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:47:35,463 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T03:47:35,464 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T03:47:35,464 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2090765269, stopped=false 2024-12-06T03:47:35,464 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6f1b912b0816,44365,1733456815530 2024-12-06T03:47:35,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:47:35,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:47:35,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37887-0x101aa09785e0002, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:47:35,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:35,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:35,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37887-0x101aa09785e0002, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:35,532 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:47:35,533 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:47:35,533 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:47:35,534 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:47:35,534 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37887-0x101aa09785e0002, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:47:35,534 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:47:35,534 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6f1b912b0816,37979,1733456815684' ***** 2024-12-06T03:47:35,535 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T03:47:35,535 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:47:35,535 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6f1b912b0816,37887,1733456817153' ***** 2024-12-06T03:47:35,535 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T03:47:35,536 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T03:47:35,536 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T03:47:35,536 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T03:47:35,536 INFO [RS:0;6f1b912b0816:37979 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T03:47:35,536 INFO [RS:1;6f1b912b0816:37887 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T03:47:35,536 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T03:47:35,536 INFO [RS:0;6f1b912b0816:37979 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T03:47:35,536 INFO [RS:1;6f1b912b0816:37887 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T03:47:35,536 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(959): stopping server 6f1b912b0816,37887,1733456817153 2024-12-06T03:47:35,536 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(3091): Received CLOSE for be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:47:35,537 INFO [RS:1;6f1b912b0816:37887 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:47:35,537 INFO [RS:1;6f1b912b0816:37887 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;6f1b912b0816:37887. 2024-12-06T03:47:35,537 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(959): stopping server 6f1b912b0816,37979,1733456815684 2024-12-06T03:47:35,537 DEBUG [RS:1;6f1b912b0816:37887 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:47:35,537 INFO [RS:0;6f1b912b0816:37979 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:47:35,537 DEBUG [RS:1;6f1b912b0816:37887 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:47:35,537 INFO [RS:0;6f1b912b0816:37979 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6f1b912b0816:37979. 2024-12-06T03:47:35,537 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(976): stopping server 6f1b912b0816,37887,1733456817153; all regions closed. 2024-12-06T03:47:35,537 DEBUG [RS:0;6f1b912b0816:37979 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:47:35,537 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing be1f405d2ef6b6215b6bdbbe6eeb9550, disabling compactions & flushes 2024-12-06T03:47:35,537 DEBUG [RS:0;6f1b912b0816:37979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:47:35,537 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:47:35,537 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T03:47:35,537 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:47:35,537 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T03:47:35,538 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T03:47:35,538 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. after waiting 0 ms 2024-12-06T03:47:35,538 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:47:35,538 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T03:47:35,538 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,538 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,538 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,538 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,538 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,538 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-06T03:47:35,538 DEBUG [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(1325): Online Regions={be1f405d2ef6b6215b6bdbbe6eeb9550=TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550., 1588230740=hbase:meta,,1.1588230740} 2024-12-06T03:47:35,538 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:47:35,538 DEBUG [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, be1f405d2ef6b6215b6bdbbe6eeb9550 2024-12-06T03:47:35,538 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:47:35,539 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:47:35,538 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/71748fe632834110ae0d9fc75c5ea59d, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7448f66feac34475a7e2f5c62b3a8e47, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e95d3c58d7c04e16b64a8ea9c135820c, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/9c22af9e2acc4d7d8002b7d568197dfd, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/275bdac65df346d9a0dbe3f70f49ddff] to archive 2024-12-06T03:47:35,539 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:47:35,539 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:47:35,539 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-06T03:47:35,539 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,539 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,539 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 2024-12-06T03:47:35,539 ERROR [FSHLog-0-hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726-prefix:6f1b912b0816,37979,1733456815684.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,539 WARN [FSHLog-0-hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726-prefix:6f1b912b0816,37979,1733456815684.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,540 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C37979%2C1733456815684.meta:.meta(num 1733456816885) roll requested 2024-12-06T03:47:35,540 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C37979%2C1733456815684.meta.1733456855540.meta 2024-12-06T03:47:35,540 WARN [IPC Server handler 3 on default port 46387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741837_1013 2024-12-06T03:47:35,540 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T03:47:35,540 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 after 1ms 2024-12-06T03:47:35,542 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/71748fe632834110ae0d9fc75c5ea59d to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/71748fe632834110ae0d9fc75c5ea59d 2024-12-06T03:47:35,543 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36403 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,543 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:56480 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741886_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data4]'}, localName='127.0.0.1:42037', datanodeUuid='b55de3f7-2a18-4d64-98a6-55c777f5975c', xmitsInProgress=0}:Exception transferring block BP-1883769982-172.17.0.2-1733456813785:blk_1073741886_1071 to mirror 127.0.0.1:36403 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:35,544 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741886_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42037,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK], DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:35,544 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741886_1071 2024-12-06T03:47:35,544 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:56480 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741886_1071] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T03:47:35,544 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7448f66feac34475a7e2f5c62b3a8e47 to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7448f66feac34475a7e2f5c62b3a8e47 2024-12-06T03:47:35,544 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1530256290_22 at /127.0.0.1:56480 [Receiving block BP-1883769982-172.17.0.2-1733456813785:blk_1073741886_1071] {}] datanode.DataXceiver(331): 127.0.0.1:42037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56480 dst: /127.0.0.1:42037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:35,544 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:35,545 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/7773614e73274fa7a0e655c288709d4c 2024-12-06T03:47:35,547 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e95d3c58d7c04e16b64a8ea9c135820c to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/e95d3c58d7c04e16b64a8ea9c135820c 2024-12-06T03:47:35,548 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,548 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,548 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,548 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,548 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/9c22af9e2acc4d7d8002b7d568197dfd to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/9c22af9e2acc4d7d8002b7d568197dfd 2024-12-06T03:47:35,548 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,549 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456855540.meta 2024-12-06T03:47:35,549 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,549 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46451,DS-09efbea7-e9e5-4a00-ac16-201242145f3f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,549 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta 2024-12-06T03:47:35,549 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46431:46431),(127.0.0.1/127.0.0.1:37639:37639)] 2024-12-06T03:47:35,549 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta is not closed yet, will try archiving it next time 2024-12-06T03:47:35,550 WARN [IPC Server handler 3 on default port 46387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741834_1010 2024-12-06T03:47:35,550 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta after 1ms 2024-12-06T03:47:35,550 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/275bdac65df346d9a0dbe3f70f49ddff to hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/info/275bdac65df346d9a0dbe3f70f49ddff 2024-12-06T03:47:35,550 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6f1b912b0816:44365 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-06T03:47:35,551 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [71748fe632834110ae0d9fc75c5ea59d=10347, 7448f66feac34475a7e2f5c62b3a8e47=12506, 7773614e73274fa7a0e655c288709d4c=17994, e95d3c58d7c04e16b64a8ea9c135820c=6027, 9c22af9e2acc4d7d8002b7d568197dfd=6027, 275bdac65df346d9a0dbe3f70f49ddff=11421] 2024-12-06T03:47:35,554 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/default/TestLogRolling-testLogRollOnDatanodeDeath/be1f405d2ef6b6215b6bdbbe6eeb9550/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-12-06T03:47:35,555 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:47:35,555 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for be1f405d2ef6b6215b6bdbbe6eeb9550: Waiting for close lock at 1733456855537Running coprocessor pre-close hooks at 1733456855537Disabling compacts and flushes for region at 1733456855537Disabling writes for close at 1733456855538 (+1 ms)Writing region close event to WAL at 1733456855551 (+13 ms)Running coprocessor post-close hooks at 1733456855555 (+4 ms)Closed at 1733456855555 2024-12-06T03:47:35,556 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550. 2024-12-06T03:47:35,564 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/.tmp/info/718e744f4d6846d0ae903e38511e0d05 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733456817286.be1f405d2ef6b6215b6bdbbe6eeb9550./info:regioninfo/1733456817658/Put/seqid=0 2024-12-06T03:47:35,566 WARN [Thread-1038 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,566 WARN [Thread-1038 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741888_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:42037,DS-7fe5a571-6237-4a8e-a91a-2c8ecc4766aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:35,566 WARN [Thread-1038 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741888_1074 2024-12-06T03:47:35,567 WARN [Thread-1038 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:35,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741889_1075 (size=7089) 2024-12-06T03:47:35,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741889_1075 (size=7089) 2024-12-06T03:47:35,571 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/.tmp/info/718e744f4d6846d0ae903e38511e0d05 2024-12-06T03:47:35,597 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/.tmp/ns/bc0b8c15325f4b32afd408b13377f0b7 is 43, key is default/ns:d/1733456817004/Put/seqid=0 2024-12-06T03:47:35,599 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:35,599 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741890_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:35,599 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741890_1076 2024-12-06T03:47:35,600 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:35,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741891_1077 (size=5153) 2024-12-06T03:47:35,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741891_1077 (size=5153) 2024-12-06T03:47:35,604 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/.tmp/ns/bc0b8c15325f4b32afd408b13377f0b7 2024-12-06T03:47:35,622 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/.tmp/table/54b562addfa44f7aa12dc36b1b2b3540 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733456817670/Put/seqid=0 2024-12-06T03:47:35,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741892_1078 (size=5424) 2024-12-06T03:47:35,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741892_1078 (size=5424) 2024-12-06T03:47:35,628 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/.tmp/table/54b562addfa44f7aa12dc36b1b2b3540 2024-12-06T03:47:35,634 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/.tmp/info/718e744f4d6846d0ae903e38511e0d05 as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/info/718e744f4d6846d0ae903e38511e0d05 2024-12-06T03:47:35,640 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/info/718e744f4d6846d0ae903e38511e0d05, entries=10, sequenceid=11, filesize=6.9 K 2024-12-06T03:47:35,641 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/.tmp/ns/bc0b8c15325f4b32afd408b13377f0b7 as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/ns/bc0b8c15325f4b32afd408b13377f0b7 2024-12-06T03:47:35,646 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/ns/bc0b8c15325f4b32afd408b13377f0b7, entries=2, sequenceid=11, filesize=5.0 K 2024-12-06T03:47:35,647 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/.tmp/table/54b562addfa44f7aa12dc36b1b2b3540 as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/table/54b562addfa44f7aa12dc36b1b2b3540 2024-12-06T03:47:35,652 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/table/54b562addfa44f7aa12dc36b1b2b3540, entries=2, sequenceid=11, filesize=5.3 K 2024-12-06T03:47:35,654 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=11, compaction requested=false 2024-12-06T03:47:35,658 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-06T03:47:35,659 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:47:35,659 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:47:35,659 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456855538Running coprocessor pre-close hooks at 1733456855538Disabling compacts and flushes for region at 1733456855538Disabling writes for close at 1733456855539 (+1 ms)Obtaining lock to block concurrent updates at 1733456855539Preparing flush snapshotting stores in 1588230740 at 1733456855539Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733456855539Flushing stores of hbase:meta,,1.1588230740 at 1733456855550 (+11 ms)Flushing 1588230740/info: creating writer at 1733456855550Flushing 1588230740/info: appending metadata at 1733456855564 (+14 ms)Flushing 1588230740/info: closing flushed file at 1733456855564Flushing 1588230740/ns: creating writer at 1733456855578 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733456855597 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733456855597Flushing 1588230740/table: creating writer at 1733456855609 (+12 ms)Flushing 1588230740/table: appending metadata at 1733456855622 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733456855622Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66491377: reopening flushed file at 1733456855633 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@81d8a48: reopening flushed file at 1733456855640 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72e1a5a5: reopening flushed file at 1733456855647 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 114ms, sequenceid=11, compaction requested=false at 1733456855654 (+7 ms)Writing region close event to WAL at 1733456855655 (+1 ms)Running coprocessor post-close hooks at 1733456855659 (+4 ms)Closed at 1733456855659 2024-12-06T03:47:35,659 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T03:47:35,739 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(976): stopping server 6f1b912b0816,37979,1733456815684; all regions closed. 2024-12-06T03:47:35,739 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,739 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,739 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,740 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,740 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:35,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741887_1072 (size=825) 2024-12-06T03:47:35,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741887_1072 (size=825) 2024-12-06T03:47:36,206 INFO [regionserver/6f1b912b0816:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:47:36,231 INFO [regionserver/6f1b912b0816:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T03:47:36,231 INFO [regionserver/6f1b912b0816:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T03:47:36,255 INFO [regionserver/6f1b912b0816:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T03:47:36,255 INFO [regionserver/6f1b912b0816:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T03:47:36,999 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4355ff0e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741825_1001 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:37,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:47:37,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741860_1043 (size=13591) 2024-12-06T03:47:37,255 INFO [regionserver/6f1b912b0816:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:47:37,667 INFO [master/6f1b912b0816:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T03:47:37,667 INFO [master/6f1b912b0816:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T03:47:37,999 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4355ff0e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42037, datanodeUuid=b55de3f7-2a18-4d64-98a6-55c777f5975c, infoPort=37639, infoSecurePort=0, ipcPort=37805, storageInfo=lv=-57;cid=testClusterID;nsid=524097022;c=1733456813785):Failed to transfer BP-1883769982-172.17.0.2-1733456813785:blk_1073741828_1004 to 127.0.0.1:36403 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:38,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741836_1012 (size=76) 2024-12-06T03:47:39,543 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 after 4003ms 2024-12-06T03:47:39,552 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta after 4003ms 2024-12-06T03:47:40,043 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@74988e58 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1883769982-172.17.0.2-1733456813785:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:46451,null,null]) java.net.ConnectException: Call From 6f1b912b0816/172.17.0.2 to localhost:41371 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-06T03:47:40,539 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-06T03:47:40,545 DEBUG [RS:1;6f1b912b0816:37887 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs 2024-12-06T03:47:40,545 INFO [RS:1;6f1b912b0816:37887 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C37887%2C1733456817153:(num 1733456817388) 2024-12-06T03:47:40,545 DEBUG [RS:1;6f1b912b0816:37887 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:47:40,546 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:47:40,546 INFO [RS:1;6f1b912b0816:37887 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:47:40,547 INFO [RS:1;6f1b912b0816:37887 {}] hbase.ChoreService(370): Chore service for: regionserver/6f1b912b0816:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T03:47:40,547 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T03:47:40,547 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T03:47:40,547 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:47:40,547 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T03:47:40,547 INFO [RS:1;6f1b912b0816:37887 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:47:40,548 INFO [RS:1;6f1b912b0816:37887 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37887 2024-12-06T03:47:40,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:40,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:40,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:40,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:40,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:40,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:40,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:40,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:40,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:47:40,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37887-0x101aa09785e0002, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6f1b912b0816,37887,1733456817153 2024-12-06T03:47:40,613 INFO [RS:1;6f1b912b0816:37887 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:47:40,614 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6f1b912b0816,37887,1733456817153] 2024-12-06T03:47:40,629 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6f1b912b0816,37887,1733456817153 already deleted, retry=false 2024-12-06T03:47:40,629 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6f1b912b0816,37887,1733456817153 expired; onlineServers=1 2024-12-06T03:47:40,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37887-0x101aa09785e0002, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:47:40,721 INFO [RS:1;6f1b912b0816:37887 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:47:40,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37887-0x101aa09785e0002, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:47:40,721 INFO [RS:1;6f1b912b0816:37887 {}] regionserver.HRegionServer(1031): Exiting; stopping=6f1b912b0816,37887,1733456817153; zookeeper connection closed. 2024-12-06T03:47:40,722 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@562e5c07 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@562e5c07 2024-12-06T03:47:40,740 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-06T03:47:40,747 DEBUG [RS:0;6f1b912b0816:37979 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs 2024-12-06T03:47:40,748 INFO [RS:0;6f1b912b0816:37979 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C37979%2C1733456815684.meta:.meta(num 1733456855540) 2024-12-06T03:47:40,748 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,748 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,749 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,749 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,749 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741880_1064 (size=16308) 2024-12-06T03:47:40,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741880_1064 (size=16308) 2024-12-06T03:47:40,754 DEBUG [RS:0;6f1b912b0816:37979 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/oldWALs 2024-12-06T03:47:40,754 INFO [RS:0;6f1b912b0816:37979 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C37979%2C1733456815684:(num 1733456855025) 2024-12-06T03:47:40,754 DEBUG [RS:0;6f1b912b0816:37979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:47:40,754 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:47:40,754 INFO [RS:0;6f1b912b0816:37979 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:47:40,755 INFO [RS:0;6f1b912b0816:37979 {}] hbase.ChoreService(370): Chore service for: regionserver/6f1b912b0816:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T03:47:40,755 INFO [RS:0;6f1b912b0816:37979 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:47:40,755 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:47:40,755 INFO [RS:0;6f1b912b0816:37979 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37979 2024-12-06T03:47:40,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6f1b912b0816,37979,1733456815684 2024-12-06T03:47:40,786 INFO [RS:0;6f1b912b0816:37979 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:47:40,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:47:40,796 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6f1b912b0816,37979,1733456815684] 2024-12-06T03:47:40,804 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6f1b912b0816,37979,1733456815684 already deleted, retry=false 2024-12-06T03:47:40,804 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6f1b912b0816,37979,1733456815684 expired; onlineServers=0 2024-12-06T03:47:40,804 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6f1b912b0816,44365,1733456815530' ***** 2024-12-06T03:47:40,804 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T03:47:40,804 INFO [M:0;6f1b912b0816:44365 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:47:40,804 INFO [M:0;6f1b912b0816:44365 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:47:40,804 DEBUG [M:0;6f1b912b0816:44365 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T03:47:40,804 DEBUG [M:0;6f1b912b0816:44365 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T03:47:40,804 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T03:47:40,804 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456816063 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456816063,5,FailOnTimeoutGroup] 2024-12-06T03:47:40,804 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456816063 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456816063,5,FailOnTimeoutGroup] 2024-12-06T03:47:40,805 INFO [M:0;6f1b912b0816:44365 {}] hbase.ChoreService(370): Chore service for: master/6f1b912b0816:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T03:47:40,805 INFO [M:0;6f1b912b0816:44365 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:47:40,805 DEBUG [M:0;6f1b912b0816:44365 {}] master.HMaster(1795): Stopping service threads 2024-12-06T03:47:40,805 INFO [M:0;6f1b912b0816:44365 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T03:47:40,805 INFO [M:0;6f1b912b0816:44365 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:47:40,805 INFO [M:0;6f1b912b0816:44365 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T03:47:40,805 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T03:47:40,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T03:47:40,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:40,813 DEBUG [M:0;6f1b912b0816:44365 {}] zookeeper.ZKUtil(347): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T03:47:40,813 WARN [M:0;6f1b912b0816:44365 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T03:47:40,813 INFO [M:0;6f1b912b0816:44365 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/.lastflushedseqids 2024-12-06T03:47:40,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741893_1079 (size=130) 2024-12-06T03:47:40,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741893_1079 (size=130) 2024-12-06T03:47:40,819 INFO [M:0;6f1b912b0816:44365 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T03:47:40,819 INFO [M:0;6f1b912b0816:44365 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T03:47:40,820 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:47:40,820 INFO [M:0;6f1b912b0816:44365 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:47:40,820 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:47:40,820 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:47:40,820 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:47:40,820 INFO [M:0;6f1b912b0816:44365 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-12-06T03:47:40,836 DEBUG [M:0;6f1b912b0816:44365 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4db7ed422ae04c5cbc929ebb217dafd8 is 82, key is hbase:meta,,1/info:regioninfo/1733456816940/Put/seqid=0 2024-12-06T03:47:40,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741894_1080 (size=5672) 2024-12-06T03:47:40,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741894_1080 (size=5672) 2024-12-06T03:47:40,841 INFO [M:0;6f1b912b0816:44365 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4db7ed422ae04c5cbc929ebb217dafd8 2024-12-06T03:47:40,859 DEBUG [M:0;6f1b912b0816:44365 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb25451c856f4661be5fc41b84086da7 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733456817677/Put/seqid=0 2024-12-06T03:47:40,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741895_1081 (size=6254) 2024-12-06T03:47:40,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741895_1081 (size=6254) 2024-12-06T03:47:40,865 INFO [M:0;6f1b912b0816:44365 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb25451c856f4661be5fc41b84086da7 2024-12-06T03:47:40,870 INFO [M:0;6f1b912b0816:44365 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bb25451c856f4661be5fc41b84086da7 2024-12-06T03:47:40,883 DEBUG [M:0;6f1b912b0816:44365 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ff007e4538a412dbdd06993a99a20f1 is 69, key is 6f1b912b0816,37887,1733456817153/rs:state/1733456817234/Put/seqid=0 2024-12-06T03:47:40,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741896_1082 (size=5224) 2024-12-06T03:47:40,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741896_1082 (size=5224) 2024-12-06T03:47:40,888 INFO [M:0;6f1b912b0816:44365 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ff007e4538a412dbdd06993a99a20f1 2024-12-06T03:47:40,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:47:40,896 INFO [RS:0;6f1b912b0816:37979 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:47:40,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37979-0x101aa09785e0001, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:47:40,896 INFO [RS:0;6f1b912b0816:37979 {}] regionserver.HRegionServer(1031): Exiting; stopping=6f1b912b0816,37979,1733456815684; zookeeper connection closed. 2024-12-06T03:47:40,896 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6c0a1fb2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6c0a1fb2 2024-12-06T03:47:40,897 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-06T03:47:40,906 DEBUG [M:0;6f1b912b0816:44365 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d9b753b8bb0e4c8ab4d8edf65faa572c is 52, key is load_balancer_on/state:d/1733456817138/Put/seqid=0 2024-12-06T03:47:40,907 WARN [Thread-1090 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:40,907 WARN [Thread-1090 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1883769982-172.17.0.2-1733456813785:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK], DatanodeInfoWithStorage[127.0.0.1:45127,DS-00f967fa-f0d1-416d-ba25-6509174d04b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK]) is bad. 2024-12-06T03:47:40,908 WARN [Thread-1090 {}] hdfs.DataStreamer(1850): Abandoning BP-1883769982-172.17.0.2-1733456813785:blk_1073741897_1083 2024-12-06T03:47:40,908 WARN [Thread-1090 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36403,DS-b1d2fb61-5534-4164-9d81-55e45dcb2d9c,DISK] 2024-12-06T03:47:40,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741898_1084 (size=5056) 2024-12-06T03:47:40,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741898_1084 (size=5056) 2024-12-06T03:47:40,912 INFO [M:0;6f1b912b0816:44365 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d9b753b8bb0e4c8ab4d8edf65faa572c 2024-12-06T03:47:40,918 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4db7ed422ae04c5cbc929ebb217dafd8 as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4db7ed422ae04c5cbc929ebb217dafd8 2024-12-06T03:47:40,923 INFO [M:0;6f1b912b0816:44365 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4db7ed422ae04c5cbc929ebb217dafd8, entries=8, sequenceid=60, filesize=5.5 K 2024-12-06T03:47:40,924 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bb25451c856f4661be5fc41b84086da7 as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb25451c856f4661be5fc41b84086da7 2024-12-06T03:47:40,930 INFO [M:0;6f1b912b0816:44365 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bb25451c856f4661be5fc41b84086da7 2024-12-06T03:47:40,930 INFO [M:0;6f1b912b0816:44365 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bb25451c856f4661be5fc41b84086da7, entries=6, sequenceid=60, filesize=6.1 K 2024-12-06T03:47:40,931 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ff007e4538a412dbdd06993a99a20f1 as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7ff007e4538a412dbdd06993a99a20f1 2024-12-06T03:47:40,936 INFO [M:0;6f1b912b0816:44365 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7ff007e4538a412dbdd06993a99a20f1, entries=2, sequenceid=60, filesize=5.1 K 2024-12-06T03:47:40,937 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d9b753b8bb0e4c8ab4d8edf65faa572c as hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d9b753b8bb0e4c8ab4d8edf65faa572c 2024-12-06T03:47:40,942 INFO [M:0;6f1b912b0816:44365 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d9b753b8bb0e4c8ab4d8edf65faa572c, entries=1, sequenceid=60, filesize=4.9 K 2024-12-06T03:47:40,943 INFO [M:0;6f1b912b0816:44365 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=60, compaction requested=false 2024-12-06T03:47:40,945 INFO [M:0;6f1b912b0816:44365 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:47:40,945 DEBUG [M:0;6f1b912b0816:44365 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456860819Disabling compacts and flushes for region at 1733456860819Disabling writes for close at 1733456860820 (+1 ms)Obtaining lock to block concurrent updates at 1733456860820Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733456860820Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1733456860820Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733456860821 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733456860821Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733456860836 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733456860836Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733456860846 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733456860859 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733456860859Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733456860870 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733456860882 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733456860882Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733456860892 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733456860905 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733456860905Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75fe10a8: reopening flushed file at 1733456860917 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@744f0c91: reopening flushed file at 1733456860923 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@658ecc1d: reopening flushed file at 1733456860930 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38a5f975: reopening flushed file at 1733456860936 (+6 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=60, compaction requested=false at 1733456860943 (+7 ms)Writing region close event to WAL at 1733456860945 (+2 ms)Closed at 1733456860945 2024-12-06T03:47:40,945 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,945 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,945 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,945 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,945 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:40,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42037 is added to blk_1073741879_1062 (size=1045) 2024-12-06T03:47:40,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45127 is added to blk_1073741879_1062 (size=1045) 2024-12-06T03:47:40,948 INFO [M:0;6f1b912b0816:44365 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T03:47:40,948 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:47:40,948 INFO [M:0;6f1b912b0816:44365 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44365 2024-12-06T03:47:40,948 INFO [M:0;6f1b912b0816:44365 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:47:41,079 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T03:47:41,080 INFO [M:0;6f1b912b0816:44365 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:47:41,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:47:41,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44365-0x101aa09785e0000, quorum=127.0.0.1:56815, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:47:41,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74e983d1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:41,087 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@48ed1e93{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:47:41,087 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:47:41,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@721d4444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:47:41,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c767c56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,STOPPED} 2024-12-06T03:47:41,088 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@12c27034 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1883769982-172.17.0.2-1733456813785:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:46451,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:41371 , LocalHost:localPort 6f1b912b0816/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-06T03:47:41,089 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1883769982-172.17.0.2-1733456813785 (Datanode Uuid b55de3f7-2a18-4d64-98a6-55c777f5975c) service to localhost/127.0.0.1:46387 2024-12-06T03:47:41,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data3/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:41,090 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@12c27034 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:42037,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1883769982-172.17.0.2-1733456813785 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:41,090 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@12c27034 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46451,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1883769982-172.17.0.2-1733456813785 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:41,090 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data4/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:41,090 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@12c27034 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:42037,null,null], DatanodeInfoWithStorage[127.0.0.1:46451,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1883769982-172.17.0.2-1733456813785:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:42037,null,null], DatanodeInfoWithStorage[127.0.0.1:46451,null,null]] 2024-12-06T03:47:41,098 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:41,098 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:41,098 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:41,099 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:41,099 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:41,099 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:41,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:41,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:41,115 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:47:41,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a993d46{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:41,117 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ae47adb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:47:41,117 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:47:41,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@740bf9ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:47:41,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f70e325{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,STOPPED} 2024-12-06T03:47:41,119 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:47:41,119 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:47:41,119 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:47:41,119 WARN [BP-1883769982-172.17.0.2-1733456813785 heartbeating to localhost/127.0.0.1:46387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1883769982-172.17.0.2-1733456813785 (Datanode Uuid e696cafe-43c2-49d4-a640-d382b189f5dd) service to localhost/127.0.0.1:46387 2024-12-06T03:47:41,119 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data9/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:41,119 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/cluster_60fe585e-4909-6df3-516f-008292ae6be5/data/data10/current/BP-1883769982-172.17.0.2-1733456813785 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:41,120 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:47:41,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c43bbf6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:47:41,125 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b422132{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:47:41,125 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:47:41,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75b4bf6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:47:41,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7db14741{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir/,STOPPED} 2024-12-06T03:47:41,134 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T03:47:41,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T03:47:41,170 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 78) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:46387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46387 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46387 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$901/0x00007f0940bf5000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:46387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46535 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$901/0x00007f0940bf5000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46535 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46387 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46387 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=216 (was 191) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7790 (was 8763) 2024-12-06T03:47:41,176 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=216, ProcessCount=11, AvailableMemoryMB=7790 2024-12-06T03:47:41,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.log.dir so I do NOT create it in target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7c0cbc63-85de-b3b7-e3ab-d7c8c7acf8b6/hadoop.tmp.dir so I do NOT create it in target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170, deleteOnExit=true 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/test.cache.data in system properties and HBase conf 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir in system properties and HBase conf 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T03:47:41,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T03:47:41,178 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T03:47:41,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:47:41,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:47:41,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T03:47:41,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:47:41,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T03:47:41,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T03:47:41,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:47:41,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:47:41,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T03:47:41,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/nfs.dump.dir in system properties and HBase conf 2024-12-06T03:47:41,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/java.io.tmpdir in system properties and HBase conf 2024-12-06T03:47:41,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:47:41,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T03:47:41,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T03:47:41,190 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:47:41,466 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:41,470 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:47:41,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:47:41,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:47:41,471 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:47:41,472 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:41,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15d59233{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:47:41,473 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67b3f925{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:47:41,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:41,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:41,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@633966fa{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/java.io.tmpdir/jetty-localhost-35085-hadoop-hdfs-3_4_1-tests_jar-_-any-6586214732948390683/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:47:41,561 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@603c75b8{HTTP/1.1, (http/1.1)}{localhost:35085} 2024-12-06T03:47:41,561 INFO [Time-limited test {}] server.Server(415): Started @154426ms 2024-12-06T03:47:41,571 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:47:41,786 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:41,789 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:47:41,790 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:47:41,790 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:47:41,790 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:47:41,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b5810d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:47:41,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@422509aa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:47:41,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3afb89f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/java.io.tmpdir/jetty-localhost-44689-hadoop-hdfs-3_4_1-tests_jar-_-any-14605348663906084637/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:41,881 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7202cad{HTTP/1.1, (http/1.1)}{localhost:44689} 2024-12-06T03:47:41,881 INFO [Time-limited test {}] server.Server(415): Started @154746ms 2024-12-06T03:47:41,882 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:47:41,910 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:41,914 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:47:41,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:47:41,918 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:47:41,918 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:47:41,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2663ec31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:47:41,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f80a83e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:47:42,010 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70a7bee3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/java.io.tmpdir/jetty-localhost-42309-hadoop-hdfs-3_4_1-tests_jar-_-any-14774268757040816212/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:42,010 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@53796232{HTTP/1.1, (http/1.1)}{localhost:42309} 2024-12-06T03:47:42,010 INFO [Time-limited test {}] server.Server(415): Started @154875ms 2024-12-06T03:47:42,012 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:47:42,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:42,546 WARN [Thread-1185 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data1/current/BP-341572201-172.17.0.2-1733456861200/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:42,546 WARN [Thread-1186 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data2/current/BP-341572201-172.17.0.2-1733456861200/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:42,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:42,565 WARN [Thread-1149 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:47:42,568 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bd09706824b9f4c with lease ID 0x24e415818186dc3c: Processing first storage report for DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59 from datanode DatanodeRegistration(127.0.0.1:44745, datanodeUuid=3bd61f82-a77d-402f-95dc-f24115d0445c, infoPort=35221, infoSecurePort=0, ipcPort=45525, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200) 2024-12-06T03:47:42,568 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bd09706824b9f4c with lease ID 0x24e415818186dc3c: from storage DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59 node DatanodeRegistration(127.0.0.1:44745, datanodeUuid=3bd61f82-a77d-402f-95dc-f24115d0445c, infoPort=35221, infoSecurePort=0, ipcPort=45525, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:42,568 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bd09706824b9f4c with lease ID 0x24e415818186dc3c: Processing first storage report for DS-5c6d1915-913f-4f4b-b818-2314e0cc3d6e from datanode DatanodeRegistration(127.0.0.1:44745, datanodeUuid=3bd61f82-a77d-402f-95dc-f24115d0445c, infoPort=35221, infoSecurePort=0, ipcPort=45525, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200) 2024-12-06T03:47:42,568 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bd09706824b9f4c with lease ID 0x24e415818186dc3c: from storage DS-5c6d1915-913f-4f4b-b818-2314e0cc3d6e node DatanodeRegistration(127.0.0.1:44745, datanodeUuid=3bd61f82-a77d-402f-95dc-f24115d0445c, infoPort=35221, infoSecurePort=0, ipcPort=45525, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:42,691 WARN [Thread-1196 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data3/current/BP-341572201-172.17.0.2-1733456861200/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:42,691 WARN [Thread-1197 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data4/current/BP-341572201-172.17.0.2-1733456861200/current, will proceed with Du for space computation calculation, 2024-12-06T03:47:42,711 WARN [Thread-1172 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:47:42,713 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd81f171f97243c0 with lease ID 0x24e415818186dc3d: Processing first storage report for DS-ab410678-cb87-4057-8898-fe30977ecfa4 from datanode DatanodeRegistration(127.0.0.1:46467, datanodeUuid=ec612081-1d55-4bcf-a279-2bf13117f3dd, infoPort=37559, infoSecurePort=0, ipcPort=38147, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200) 2024-12-06T03:47:42,713 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd81f171f97243c0 with lease ID 0x24e415818186dc3d: from storage DS-ab410678-cb87-4057-8898-fe30977ecfa4 node DatanodeRegistration(127.0.0.1:46467, datanodeUuid=ec612081-1d55-4bcf-a279-2bf13117f3dd, infoPort=37559, infoSecurePort=0, ipcPort=38147, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:42,713 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd81f171f97243c0 with lease ID 0x24e415818186dc3d: Processing first storage report for DS-ccea4173-28c0-4dde-8c1a-f508b6510ce3 from datanode DatanodeRegistration(127.0.0.1:46467, datanodeUuid=ec612081-1d55-4bcf-a279-2bf13117f3dd, infoPort=37559, infoSecurePort=0, ipcPort=38147, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200) 2024-12-06T03:47:42,713 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd81f171f97243c0 with lease ID 0x24e415818186dc3d: from storage DS-ccea4173-28c0-4dde-8c1a-f508b6510ce3 node DatanodeRegistration(127.0.0.1:46467, datanodeUuid=ec612081-1d55-4bcf-a279-2bf13117f3dd, infoPort=37559, infoSecurePort=0, ipcPort=38147, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:42,738 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41 2024-12-06T03:47:42,741 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/zookeeper_0, clientPort=61821, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T03:47:42,742 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61821 2024-12-06T03:47:42,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:47:42,744 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:47:42,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44745 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:47:42,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46467 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:47:42,757 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8 with version=8 2024-12-06T03:47:42,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/hbase-staging 2024-12-06T03:47:42,758 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:47:42,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:47:42,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:47:42,759 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:47:42,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:47:42,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:47:42,759 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T03:47:42,759 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:47:42,760 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44283 2024-12-06T03:47:42,761 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44283 connecting to ZooKeeper ensemble=127.0.0.1:61821 2024-12-06T03:47:42,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:442830x0, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:47:42,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44283-0x101aa0a30d90000 connected 2024-12-06T03:47:42,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:47:42,880 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:47:42,883 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:47:42,884 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8, hbase.cluster.distributed=false 2024-12-06T03:47:42,886 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:47:42,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44283 2024-12-06T03:47:42,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44283 2024-12-06T03:47:42,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44283 2024-12-06T03:47:42,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44283 2024-12-06T03:47:42,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44283 2024-12-06T03:47:42,907 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:47:42,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:47:42,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:47:42,907 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:47:42,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:47:42,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:47:42,907 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T03:47:42,907 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:47:42,908 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34777 2024-12-06T03:47:42,909 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34777 connecting to ZooKeeper ensemble=127.0.0.1:61821 2024-12-06T03:47:42,910 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:47:42,911 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:47:42,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347770x0, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:47:42,920 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:347770x0, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:47:42,920 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34777-0x101aa0a30d90001 connected 2024-12-06T03:47:42,920 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T03:47:42,920 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T03:47:42,921 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T03:47:42,922 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:47:42,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34777 2024-12-06T03:47:42,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34777 2024-12-06T03:47:42,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34777 2024-12-06T03:47:42,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34777 2024-12-06T03:47:42,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34777 2024-12-06T03:47:42,936 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6f1b912b0816:44283 2024-12-06T03:47:42,936 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6f1b912b0816,44283,1733456862758 2024-12-06T03:47:42,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:47:42,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:47:42,945 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6f1b912b0816,44283,1733456862758 2024-12-06T03:47:42,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T03:47:42,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:42,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:42,953 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T03:47:42,954 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6f1b912b0816,44283,1733456862758 from backup master directory 2024-12-06T03:47:42,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6f1b912b0816,44283,1733456862758 2024-12-06T03:47:42,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:47:42,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:47:42,961 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:47:42,961 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6f1b912b0816,44283,1733456862758 2024-12-06T03:47:42,965 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/hbase.id] with ID: 3b3a644c-f47b-4506-bef8-727b0c600a40 2024-12-06T03:47:42,965 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/.tmp/hbase.id 2024-12-06T03:47:42,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46467 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:47:42,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44745 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:47:42,972 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/.tmp/hbase.id]:[hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/hbase.id] 2024-12-06T03:47:42,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T03:47:42,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:47:42,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:47:42,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T03:47:42,983 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:47:42,983 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T03:47:42,984 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-06T03:47:42,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:42,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:43,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46467 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:47:43,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44745 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:47:43,002 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:47:43,003 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T03:47:43,003 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:47:43,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46467 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:47:43,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44745 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:47:43,414 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store 2024-12-06T03:47:43,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44745 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:47:43,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46467 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:47:43,433 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:47:43,433 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:47:43,433 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:47:43,433 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:47:43,433 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:47:43,433 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:47:43,433 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:47:43,433 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456863433Disabling compacts and flushes for region at 1733456863433Disabling writes for close at 1733456863433Writing region close event to WAL at 1733456863433Closed at 1733456863433 2024-12-06T03:47:43,434 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/.initializing 2024-12-06T03:47:43,434 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758 2024-12-06T03:47:43,437 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C44283%2C1733456862758, suffix=, logDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758, archiveDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/oldWALs, maxLogs=10 2024-12-06T03:47:43,437 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C44283%2C1733456862758.1733456863437 2024-12-06T03:47:43,447 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456863437 2024-12-06T03:47:43,450 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35221:35221),(127.0.0.1/127.0.0.1:37559:37559)] 2024-12-06T03:47:43,451 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:47:43,451 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:47:43,452 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,452 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,454 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T03:47:43,456 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:43,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:47:43,457 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T03:47:43,458 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:43,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:47:43,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T03:47:43,460 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:43,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:47:43,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,461 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T03:47:43,461 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:43,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:47:43,462 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,463 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,463 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,464 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,464 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,465 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T03:47:43,466 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:47:43,468 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:47:43,469 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735649, jitterRate=-0.06457450985908508}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T03:47:43,469 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733456863452Initializing all the Stores at 1733456863453 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456863453Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456863454 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456863454Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456863454Cleaning up temporary data from old regions at 1733456863464 (+10 ms)Region opened successfully at 1733456863469 (+5 ms) 2024-12-06T03:47:43,469 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T03:47:43,473 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74dff5f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:47:43,474 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T03:47:43,474 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T03:47:43,474 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T03:47:43,475 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T03:47:43,475 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T03:47:43,476 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-06T03:47:43,476 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T03:47:43,480 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T03:47:43,481 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T03:47:43,511 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T03:47:43,511 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T03:47:43,512 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T03:47:43,519 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T03:47:43,520 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T03:47:43,521 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T03:47:43,527 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T03:47:43,529 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T03:47:43,536 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T03:47:43,539 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T03:47:43,544 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T03:47:43,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:43,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:47:43,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:47:43,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:43,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:43,553 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6f1b912b0816,44283,1733456862758, sessionid=0x101aa0a30d90000, setting cluster-up flag (Was=false) 2024-12-06T03:47:43,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:43,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:43,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:43,594 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T03:47:43,595 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,44283,1733456862758 2024-12-06T03:47:43,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:43,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:43,636 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T03:47:43,637 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,44283,1733456862758 2024-12-06T03:47:43,639 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T03:47:43,640 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T03:47:43,640 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T03:47:43,641 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T03:47:43,641 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6f1b912b0816,44283,1733456862758 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T03:47:43,643 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:47:43,643 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:47:43,643 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:47:43,643 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:47:43,643 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6f1b912b0816:0, corePoolSize=10, maxPoolSize=10 2024-12-06T03:47:43,643 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,643 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:47:43,643 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,645 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733456893645 2024-12-06T03:47:43,646 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T03:47:43,646 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T03:47:43,646 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T03:47:43,646 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T03:47:43,646 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T03:47:43,646 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T03:47:43,646 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:47:43,646 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T03:47:43,646 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,648 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:43,648 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T03:47:43,651 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T03:47:43,651 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T03:47:43,651 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T03:47:43,658 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T03:47:43,658 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T03:47:43,658 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456863658,5,FailOnTimeoutGroup] 2024-12-06T03:47:43,659 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456863659,5,FailOnTimeoutGroup] 2024-12-06T03:47:43,659 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,659 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T03:47:43,659 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,659 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44745 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:47:43,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46467 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:47:43,667 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T03:47:43,668 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8 2024-12-06T03:47:43,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46467 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:47:43,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44745 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:47:43,687 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:47:43,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:47:43,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:47:43,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:43,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:47:43,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:47:43,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:47:43,696 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:43,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:47:43,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:47:43,698 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:47:43,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:43,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:47:43,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:47:43,701 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:47:43,701 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:43,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:47:43,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:47:43,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740 2024-12-06T03:47:43,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740 2024-12-06T03:47:43,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:47:43,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:47:43,705 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:47:43,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:47:43,708 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:47:43,708 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694289, jitterRate=-0.11716626584529877}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:47:43,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733456863687Initializing all the Stores at 1733456863688 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456863688Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456863690 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456863690Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456863690Cleaning up temporary data from old regions at 1733456863704 (+14 ms)Region opened successfully at 1733456863709 (+5 ms) 2024-12-06T03:47:43,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:47:43,709 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:47:43,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:47:43,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:47:43,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:47:43,710 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:47:43,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456863709Disabling compacts and flushes for region at 1733456863709Disabling writes for close at 1733456863710 (+1 ms)Writing region close event to WAL at 1733456863710Closed at 1733456863710 2024-12-06T03:47:43,711 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:47:43,712 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T03:47:43,712 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T03:47:43,714 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:47:43,715 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T03:47:43,729 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(746): ClusterId : 3b3a644c-f47b-4506-bef8-727b0c600a40 2024-12-06T03:47:43,729 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T03:47:43,744 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T03:47:43,744 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T03:47:43,753 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T03:47:43,754 DEBUG [RS:0;6f1b912b0816:34777 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5db0c3b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:47:43,771 DEBUG [RS:0;6f1b912b0816:34777 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6f1b912b0816:34777 2024-12-06T03:47:43,771 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T03:47:43,771 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T03:47:43,771 DEBUG [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T03:47:43,772 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(2659): reportForDuty to master=6f1b912b0816,44283,1733456862758 with port=34777, startcode=1733456862907 2024-12-06T03:47:43,772 DEBUG [RS:0;6f1b912b0816:34777 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T03:47:43,774 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34161, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T03:47:43,775 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44283 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6f1b912b0816,34777,1733456862907 2024-12-06T03:47:43,775 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44283 {}] master.ServerManager(517): Registering regionserver=6f1b912b0816,34777,1733456862907 2024-12-06T03:47:43,777 DEBUG [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8 2024-12-06T03:47:43,777 DEBUG [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46537 2024-12-06T03:47:43,777 DEBUG [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T03:47:43,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:47:43,786 DEBUG [RS:0;6f1b912b0816:34777 {}] zookeeper.ZKUtil(111): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6f1b912b0816,34777,1733456862907 2024-12-06T03:47:43,786 WARN [RS:0;6f1b912b0816:34777 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:47:43,786 INFO [RS:0;6f1b912b0816:34777 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:47:43,787 DEBUG [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907 2024-12-06T03:47:43,787 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6f1b912b0816,34777,1733456862907] 2024-12-06T03:47:43,789 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T03:47:43,791 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T03:47:43,791 INFO [RS:0;6f1b912b0816:34777 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T03:47:43,791 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,791 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T03:47:43,792 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T03:47:43,792 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,792 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,792 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,792 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,792 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,792 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,792 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:47:43,792 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,792 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,792 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,793 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,793 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,793 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:47:43,793 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:47:43,793 DEBUG [RS:0;6f1b912b0816:34777 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:47:43,794 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,794 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,794 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,794 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,794 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,794 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,34777,1733456862907-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:47:43,807 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T03:47:43,807 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,34777,1733456862907-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,807 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,807 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.Replication(171): 6f1b912b0816,34777,1733456862907 started 2024-12-06T03:47:43,821 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:43,821 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(1482): Serving as 6f1b912b0816,34777,1733456862907, RpcServer on 6f1b912b0816/172.17.0.2:34777, sessionid=0x101aa0a30d90001 2024-12-06T03:47:43,821 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T03:47:43,821 DEBUG [RS:0;6f1b912b0816:34777 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6f1b912b0816,34777,1733456862907 2024-12-06T03:47:43,821 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,34777,1733456862907' 2024-12-06T03:47:43,821 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T03:47:43,822 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T03:47:43,822 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T03:47:43,822 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T03:47:43,822 DEBUG [RS:0;6f1b912b0816:34777 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6f1b912b0816,34777,1733456862907 2024-12-06T03:47:43,822 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,34777,1733456862907' 2024-12-06T03:47:43,822 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T03:47:43,823 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T03:47:43,823 DEBUG [RS:0;6f1b912b0816:34777 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T03:47:43,823 INFO [RS:0;6f1b912b0816:34777 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T03:47:43,823 INFO [RS:0;6f1b912b0816:34777 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T03:47:43,865 WARN [6f1b912b0816:44283 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-06T03:47:43,925 INFO [RS:0;6f1b912b0816:34777 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C34777%2C1733456862907, suffix=, logDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907, archiveDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/oldWALs, maxLogs=32 2024-12-06T03:47:43,926 INFO [RS:0;6f1b912b0816:34777 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C34777%2C1733456862907.1733456863926 2024-12-06T03:47:43,932 INFO [RS:0;6f1b912b0816:34777 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 2024-12-06T03:47:43,934 DEBUG [RS:0;6f1b912b0816:34777 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37559:37559),(127.0.0.1/127.0.0.1:35221:35221)] 2024-12-06T03:47:44,116 DEBUG [6f1b912b0816:44283 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T03:47:44,116 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6f1b912b0816,34777,1733456862907 2024-12-06T03:47:44,118 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,34777,1733456862907, state=OPENING 2024-12-06T03:47:44,152 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T03:47:44,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:44,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:47:44,162 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:47:44,162 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:47:44,162 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:47:44,162 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,34777,1733456862907}] 2024-12-06T03:47:44,315 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T03:47:44,317 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42593, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T03:47:44,321 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T03:47:44,321 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:47:44,323 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C34777%2C1733456862907.meta, suffix=.meta, logDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907, archiveDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/oldWALs, maxLogs=32 2024-12-06T03:47:44,324 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta 2024-12-06T03:47:44,332 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta 2024-12-06T03:47:44,338 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35221:35221),(127.0.0.1/127.0.0.1:37559:37559)] 2024-12-06T03:47:44,339 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:47:44,340 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T03:47:44,340 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T03:47:44,340 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T03:47:44,340 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T03:47:44,340 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:47:44,340 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T03:47:44,340 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T03:47:44,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:47:44,346 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:47:44,346 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:44,346 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:47:44,346 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:47:44,347 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:47:44,347 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:44,348 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:47:44,348 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:47:44,348 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:47:44,348 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:44,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:47:44,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:47:44,350 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:47:44,350 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:44,350 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:47:44,350 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:47:44,351 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740 2024-12-06T03:47:44,352 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740 2024-12-06T03:47:44,354 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:47:44,354 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:47:44,354 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:47:44,356 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:47:44,357 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795817, jitterRate=0.011934757232666016}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:47:44,357 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T03:47:44,357 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733456864340Writing region info on filesystem at 1733456864340Initializing all the Stores at 1733456864341 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456864341Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456864345 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456864345Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456864345Cleaning up temporary data from old regions at 1733456864354 (+9 ms)Running coprocessor post-open hooks at 1733456864357 (+3 ms)Region opened successfully at 1733456864357 2024-12-06T03:47:44,358 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733456864315 2024-12-06T03:47:44,360 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T03:47:44,361 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T03:47:44,361 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,34777,1733456862907 2024-12-06T03:47:44,362 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,34777,1733456862907, state=OPEN 2024-12-06T03:47:44,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:47:44,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:47:44,497 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6f1b912b0816,34777,1733456862907 2024-12-06T03:47:44,497 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:47:44,497 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:47:44,501 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T03:47:44,501 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,34777,1733456862907 in 335 msec 2024-12-06T03:47:44,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T03:47:44,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 789 msec 2024-12-06T03:47:44,504 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:47:44,504 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T03:47:44,506 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:47:44,506 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,34777,1733456862907, seqNum=-1] 2024-12-06T03:47:44,506 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:47:44,508 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40651, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:47:44,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 873 msec 2024-12-06T03:47:44,514 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733456864514, completionTime=-1 2024-12-06T03:47:44,514 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T03:47:44,514 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T03:47:44,516 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-06T03:47:44,516 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733456924516 2024-12-06T03:47:44,517 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733456984517 2024-12-06T03:47:44,517 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-06T03:47:44,517 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44283,1733456862758-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:44,517 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44283,1733456862758-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:44,517 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44283,1733456862758-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:44,517 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6f1b912b0816:44283, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:44,517 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:44,517 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:44,519 DEBUG [master/6f1b912b0816:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T03:47:44,521 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.560sec 2024-12-06T03:47:44,521 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T03:47:44,521 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T03:47:44,521 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T03:47:44,521 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T03:47:44,521 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T03:47:44,521 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44283,1733456862758-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:47:44,522 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44283,1733456862758-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T03:47:44,524 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T03:47:44,524 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T03:47:44,524 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44283,1733456862758-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:47:44,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e6faf55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:47:44,529 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6f1b912b0816,44283,-1 for getting cluster id 2024-12-06T03:47:44,529 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T03:47:44,531 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3b3a644c-f47b-4506-bef8-727b0c600a40' 2024-12-06T03:47:44,531 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T03:47:44,531 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3b3a644c-f47b-4506-bef8-727b0c600a40" 2024-12-06T03:47:44,532 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c3e09de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:47:44,532 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6f1b912b0816,44283,-1] 2024-12-06T03:47:44,532 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T03:47:44,532 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:47:44,534 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35248, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T03:47:44,535 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d31bf5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:47:44,535 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:47:44,536 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,34777,1733456862907, seqNum=-1] 2024-12-06T03:47:44,536 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:47:44,537 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44236, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:47:44,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6f1b912b0816,44283,1733456862758 2024-12-06T03:47:44,539 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:47:44,542 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-06T03:47:44,542 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-06T03:47:44,542 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-06T03:47:44,542 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-06T03:47:44,543 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 6f1b912b0816,44283,1733456862758 2024-12-06T03:47:44,543 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1357bafd 2024-12-06T03:47:44,543 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T03:47:44,545 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35252, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T03:47:44,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44283 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T03:47:44,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44283 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T03:47:44,546 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44283 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:47:44,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:44,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44283 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T03:47:44,549 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T03:47:44,549 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:44,549 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44283 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-06T03:47:44,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:47:44,550 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T03:47:44,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:44,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46467 is added to blk_1073741835_1011 (size=395) 2024-12-06T03:47:44,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44745 is added to blk_1073741835_1011 (size=395) 2024-12-06T03:47:44,560 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5b842cd62a62f90f865e01e7049cb2df, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8 2024-12-06T03:47:44,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46467 is added to blk_1073741836_1012 (size=78) 2024-12-06T03:47:44,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44745 is added to blk_1073741836_1012 (size=78) 2024-12-06T03:47:44,986 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:47:44,986 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 5b842cd62a62f90f865e01e7049cb2df, disabling compactions & flushes 2024-12-06T03:47:44,986 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:47:44,986 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:47:44,986 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. after waiting 0 ms 2024-12-06T03:47:44,986 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:47:44,986 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:47:44,986 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5b842cd62a62f90f865e01e7049cb2df: Waiting for close lock at 1733456864986Disabling compacts and flushes for region at 1733456864986Disabling writes for close at 1733456864986Writing region close event to WAL at 1733456864986Closed at 1733456864986 2024-12-06T03:47:44,988 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T03:47:44,988 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733456864988"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733456864988"}]},"ts":"1733456864988"} 2024-12-06T03:47:44,991 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-06T03:47:44,992 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T03:47:44,993 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456864993"}]},"ts":"1733456864993"} 2024-12-06T03:47:44,995 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-06T03:47:44,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5b842cd62a62f90f865e01e7049cb2df, ASSIGN}] 2024-12-06T03:47:44,997 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5b842cd62a62f90f865e01e7049cb2df, ASSIGN 2024-12-06T03:47:44,999 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5b842cd62a62f90f865e01e7049cb2df, ASSIGN; state=OFFLINE, location=6f1b912b0816,34777,1733456862907; forceNewPlan=false, retain=false 2024-12-06T03:47:45,150 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5b842cd62a62f90f865e01e7049cb2df, regionState=OPENING, regionLocation=6f1b912b0816,34777,1733456862907 2024-12-06T03:47:45,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5b842cd62a62f90f865e01e7049cb2df, ASSIGN because future has completed 2024-12-06T03:47:45,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b842cd62a62f90f865e01e7049cb2df, server=6f1b912b0816,34777,1733456862907}] 2024-12-06T03:47:45,311 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:47:45,311 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5b842cd62a62f90f865e01e7049cb2df, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:47:45,311 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,311 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:47:45,311 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,311 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,313 INFO [StoreOpener-5b842cd62a62f90f865e01e7049cb2df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,314 INFO [StoreOpener-5b842cd62a62f90f865e01e7049cb2df-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5b842cd62a62f90f865e01e7049cb2df columnFamilyName info 2024-12-06T03:47:45,314 DEBUG [StoreOpener-5b842cd62a62f90f865e01e7049cb2df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:47:45,315 INFO [StoreOpener-5b842cd62a62f90f865e01e7049cb2df-1 {}] regionserver.HStore(327): Store=5b842cd62a62f90f865e01e7049cb2df/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:47:45,315 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,316 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/default/TestLogRolling-testLogRollOnPipelineRestart/5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,316 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/default/TestLogRolling-testLogRollOnPipelineRestart/5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,317 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,317 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,318 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,322 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/default/TestLogRolling-testLogRollOnPipelineRestart/5b842cd62a62f90f865e01e7049cb2df/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:47:45,323 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5b842cd62a62f90f865e01e7049cb2df; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859587, jitterRate=0.09302237629890442}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T03:47:45,323 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:47:45,324 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5b842cd62a62f90f865e01e7049cb2df: Running coprocessor pre-open hook at 1733456865312Writing region info on filesystem at 1733456865312Initializing all the Stores at 1733456865312Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456865312Cleaning up temporary data from old regions at 1733456865317 (+5 ms)Running coprocessor post-open hooks at 1733456865323 (+6 ms)Region opened successfully at 1733456865324 (+1 ms) 2024-12-06T03:47:45,325 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df., pid=6, masterSystemTime=1733456865306 2024-12-06T03:47:45,328 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:47:45,328 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:47:45,329 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5b842cd62a62f90f865e01e7049cb2df, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,34777,1733456862907 2024-12-06T03:47:45,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b842cd62a62f90f865e01e7049cb2df, server=6f1b912b0816,34777,1733456862907 because future has completed 2024-12-06T03:47:45,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T03:47:45,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5b842cd62a62f90f865e01e7049cb2df, server=6f1b912b0816,34777,1733456862907 in 181 msec 2024-12-06T03:47:45,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T03:47:45,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=5b842cd62a62f90f865e01e7049cb2df, ASSIGN in 343 msec 2024-12-06T03:47:45,345 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T03:47:45,345 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456865345"}]},"ts":"1733456865345"} 2024-12-06T03:47:45,348 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-06T03:47:45,350 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T03:47:45,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 804 msec 2024-12-06T03:47:45,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:45,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:46,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:46,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:47,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:47,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:48,477 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T03:47:48,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,492 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:47:48,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:48,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:49,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:49,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:49,790 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T03:47:49,790 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-06T03:47:50,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:50,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:51,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:51,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:52,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:52,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:52,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:47:52,975 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-06T03:47:52,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T03:47:52,975 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-06T03:47:52,976 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:47:52,976 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-06T03:47:52,976 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T03:47:52,976 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-06T03:47:53,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:53,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:54,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:54,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:54,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:47:54,611 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-06T03:47:54,611 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-06T03:47:54,614 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T03:47:54,614 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:47:54,618 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df., hostname=6f1b912b0816,34777,1733456862907, seqNum=2] 2024-12-06T03:47:55,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:55,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:56,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:56,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:56,622 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 2024-12-06T03:47:56,622 WARN [ResponseProcessor for block BP-341572201-172.17.0.2-1733456861200:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-341572201-172.17.0.2-1733456861200:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:56,622 WARN [ResponseProcessor for block BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:46467,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:56,623 WARN [DataStreamer for file /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456863437 block BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK], DatanodeInfoWithStorage[127.0.0.1:46467,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46467,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]) is bad. 2024-12-06T03:47:56,623 WARN [DataStreamer for file /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 block BP-341572201-172.17.0.2-1733456861200:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-341572201-172.17.0.2-1733456861200:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46467,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK], DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46467,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]) is bad. 2024-12-06T03:47:56,623 WARN [PacketResponder: BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46467] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,623 WARN [ResponseProcessor for block BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:46467,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:56,623 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_997564354_22 at /127.0.0.1:52928 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44745:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52928 dst: /127.0.0.1:44745 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,623 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_663755632_22 at /127.0.0.1:51986 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46467:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51986 dst: /127.0.0.1:46467 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_663755632_22 at /127.0.0.1:52976 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44745:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52976 dst: /127.0.0.1:44745 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_997564354_22 at /127.0.0.1:51960 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46467:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51960 dst: /127.0.0.1:46467 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,624 WARN [DataStreamer for file /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta block BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK], DatanodeInfoWithStorage[127.0.0.1:46467,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46467,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]) is bad. 2024-12-06T03:47:56,624 WARN [PacketResponder: BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46467] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,625 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_663755632_22 at /127.0.0.1:52990 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44745:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52990 dst: /127.0.0.1:44745 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,625 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_663755632_22 at /127.0.0.1:51990 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46467:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51990 dst: /127.0.0.1:46467 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70a7bee3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:56,696 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@53796232{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:47:56,696 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:47:56,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f80a83e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:47:56,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2663ec31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,STOPPED} 2024-12-06T03:47:56,697 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:47:56,697 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:47:56,697 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-341572201-172.17.0.2-1733456861200 (Datanode Uuid ec612081-1d55-4bcf-a279-2bf13117f3dd) service to localhost/127.0.0.1:46537 2024-12-06T03:47:56,697 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:47:56,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data3/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:56,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data4/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:56,698 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:47:56,708 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:56,711 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:47:56,712 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:47:56,712 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:47:56,712 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:47:56,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56cae33a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:47:56,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@774f5f60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:47:56,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64395bd2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/java.io.tmpdir/jetty-localhost-45199-hadoop-hdfs-3_4_1-tests_jar-_-any-13112837754289012679/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:56,808 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@256e4c63{HTTP/1.1, (http/1.1)}{localhost:45199} 2024-12-06T03:47:56,808 INFO [Time-limited test {}] server.Server(415): Started @169673ms 2024-12-06T03:47:56,809 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:47:56,831 WARN [ResponseProcessor for block BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:56,831 WARN [ResponseProcessor for block BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:56,831 WARN [ResponseProcessor for block BP-341572201-172.17.0.2-1733456861200:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-341572201-172.17.0.2-1733456861200:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:56,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_663755632_22 at /127.0.0.1:43066 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44745:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43066 dst: /127.0.0.1:44745 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_663755632_22 at /127.0.0.1:43052 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44745:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43052 dst: /127.0.0.1:44745 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_997564354_22 at /127.0.0.1:43078 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44745:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43078 dst: /127.0.0.1:44745 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:47:56,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3afb89f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:56,835 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7202cad{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:47:56,835 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:47:56,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@422509aa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:47:56,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b5810d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,STOPPED} 2024-12-06T03:47:56,836 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:47:56,836 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:47:56,836 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:47:56,836 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-341572201-172.17.0.2-1733456861200 (Datanode Uuid 3bd61f82-a77d-402f-95dc-f24115d0445c) service to localhost/127.0.0.1:46537 2024-12-06T03:47:56,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data1/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:56,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data2/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:47:56,837 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:47:56,847 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:47:56,850 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:47:56,851 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:47:56,851 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:47:56,851 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:47:56,851 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75a64b70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:47:56,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cfd0f6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:47:56,956 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66fcf468{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/java.io.tmpdir/jetty-localhost-39123-hadoop-hdfs-3_4_1-tests_jar-_-any-8427630956603431195/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:47:56,956 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@326d1de5{HTTP/1.1, (http/1.1)}{localhost:39123} 2024-12-06T03:47:56,956 INFO [Time-limited test {}] server.Server(415): Started @169821ms 2024-12-06T03:47:56,958 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:47:57,234 WARN [Thread-1320 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:47:57,237 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2095eeeb64b27997 with lease ID 0x24e415818186dc3e: from storage DS-ab410678-cb87-4057-8898-fe30977ecfa4 node DatanodeRegistration(127.0.0.1:39995, datanodeUuid=ec612081-1d55-4bcf-a279-2bf13117f3dd, infoPort=39551, infoSecurePort=0, ipcPort=41231, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:57,237 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2095eeeb64b27997 with lease ID 0x24e415818186dc3e: from storage DS-ccea4173-28c0-4dde-8c1a-f508b6510ce3 node DatanodeRegistration(127.0.0.1:39995, datanodeUuid=ec612081-1d55-4bcf-a279-2bf13117f3dd, infoPort=39551, infoSecurePort=0, ipcPort=41231, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:57,325 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:47:57,327 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f55b2254fbdd703 with lease ID 0x24e415818186dc3f: from storage DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59 node DatanodeRegistration(127.0.0.1:33997, datanodeUuid=3bd61f82-a77d-402f-95dc-f24115d0445c, infoPort=45413, infoSecurePort=0, ipcPort=36789, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T03:47:57,328 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f55b2254fbdd703 with lease ID 0x24e415818186dc3f: from storage DS-5c6d1915-913f-4f4b-b818-2314e0cc3d6e node DatanodeRegistration(127.0.0.1:33997, datanodeUuid=3bd61f82-a77d-402f-95dc-f24115d0445c, infoPort=45413, infoSecurePort=0, ipcPort=36789, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:47:57,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:57,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:57,976 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-06T03:47:57,978 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-06T03:47:57,980 ERROR [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8-prefix:6f1b912b0816,34777,1733456862907 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:57,980 WARN [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8-prefix:6f1b912b0816,34777,1733456862907 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:57,980 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C34777%2C1733456862907:(num 1733456863926) roll requested 2024-12-06T03:47:57,980 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C34777%2C1733456862907.1733456877980 2024-12-06T03:47:57,986 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 newFile=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 2024-12-06T03:47:57,986 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:57,986 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:57,986 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:57,986 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:57,986 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:47:57,987 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 2024-12-06T03:47:57,987 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:57,987 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:47:57,987 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 2024-12-06T03:47:57,987 WARN [IPC Server handler 4 on default port 46537 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-12-06T03:47:57,988 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 after 1ms 2024-12-06T03:47:57,991 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39551:39551),(127.0.0.1/127.0.0.1:45413:45413)] 2024-12-06T03:47:57,991 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 is not closed yet, will try archiving it next time 2024-12-06T03:47:58,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:58,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:59,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:59,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:47:59,994 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-06T03:48:00,238 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T03:48:00,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:00,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:01,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:01,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:01,988 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 after 4001ms 2024-12-06T03:48:01,997 WARN [ResponseProcessor for block BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:33997,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:01,997 WARN [DataStreamer for file /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 block BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39995,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK], DatanodeInfoWithStorage[127.0.0.1:33997,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33997,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]) is bad. 2024-12-06T03:48:01,997 WARN [PacketResponder: BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33997] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:48:01,997 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_663755632_22 at /127.0.0.1:57554 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39995:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57554 dst: /127.0.0.1:39995 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:48:01,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_663755632_22 at /127.0.0.1:39470 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39470 dst: /127.0.0.1:33997 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:48:02,046 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66fcf468{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:48:02,046 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@326d1de5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:48:02,046 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:48:02,046 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cfd0f6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:48:02,046 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75a64b70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,STOPPED} 2024-12-06T03:48:02,047 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:48:02,047 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-341572201-172.17.0.2-1733456861200 (Datanode Uuid 3bd61f82-a77d-402f-95dc-f24115d0445c) service to localhost/127.0.0.1:46537 2024-12-06T03:48:02,047 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:48:02,047 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:48:02,047 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data1/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:48:02,048 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data2/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:48:02,048 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:48:02,063 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:48:02,065 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:48:02,067 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:48:02,067 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:48:02,067 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:48:02,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@312832b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:48:02,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@138a9f82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:48:02,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@31767992{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/java.io.tmpdir/jetty-localhost-38209-hadoop-hdfs-3_4_1-tests_jar-_-any-7187948462749100274/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:48:02,170 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2859ae00{HTTP/1.1, (http/1.1)}{localhost:38209} 2024-12-06T03:48:02,170 INFO [Time-limited test {}] server.Server(415): Started @175035ms 2024-12-06T03:48:02,171 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:48:02,214 WARN [ResponseProcessor for block BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:02,215 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_663755632_22 at /127.0.0.1:53074 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39995:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53074 dst: /127.0.0.1:39995 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:48:02,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64395bd2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:48:02,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@256e4c63{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:48:02,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:48:02,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@774f5f60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:48:02,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56cae33a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,STOPPED} 2024-12-06T03:48:02,222 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:48:02,222 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:48:02,222 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:48:02,222 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-341572201-172.17.0.2-1733456861200 (Datanode Uuid ec612081-1d55-4bcf-a279-2bf13117f3dd) service to localhost/127.0.0.1:46537 2024-12-06T03:48:02,223 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data3/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:48:02,223 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data4/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:48:02,223 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:48:02,238 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:48:02,244 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:48:02,245 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:48:02,245 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:48:02,246 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:48:02,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@276f61b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:48:02,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c977ad0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:48:02,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4e7425dc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/java.io.tmpdir/jetty-localhost-39923-hadoop-hdfs-3_4_1-tests_jar-_-any-8672577504389562976/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:48:02,351 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@44a4f3f8{HTTP/1.1, (http/1.1)}{localhost:39923} 2024-12-06T03:48:02,351 INFO [Time-limited test {}] server.Server(415): Started @175217ms 2024-12-06T03:48:02,354 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:48:02,558 WARN [Thread-1394 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:48:02,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:02,560 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf32e47149c9fcf12 with lease ID 0x24e415818186dc40: from storage DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59 node DatanodeRegistration(127.0.0.1:42661, datanodeUuid=3bd61f82-a77d-402f-95dc-f24115d0445c, infoPort=38103, infoSecurePort=0, ipcPort=33059, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:48:02,561 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf32e47149c9fcf12 with lease ID 0x24e415818186dc40: from storage DS-5c6d1915-913f-4f4b-b818-2314e0cc3d6e node DatanodeRegistration(127.0.0.1:42661, datanodeUuid=3bd61f82-a77d-402f-95dc-f24115d0445c, infoPort=38103, infoSecurePort=0, ipcPort=33059, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:48:02,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:02,721 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:48:02,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa68bbb8220e227c with lease ID 0x24e415818186dc41: from storage DS-ab410678-cb87-4057-8898-fe30977ecfa4 node DatanodeRegistration(127.0.0.1:41117, datanodeUuid=ec612081-1d55-4bcf-a279-2bf13117f3dd, infoPort=43205, infoSecurePort=0, ipcPort=40207, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:48:02,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa68bbb8220e227c with lease ID 0x24e415818186dc41: from storage DS-ccea4173-28c0-4dde-8c1a-f508b6510ce3 node DatanodeRegistration(127.0.0.1:41117, datanodeUuid=ec612081-1d55-4bcf-a279-2bf13117f3dd, infoPort=43205, infoSecurePort=0, ipcPort=40207, storageInfo=lv=-57;cid=testClusterID;nsid=692526651;c=1733456861200), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:48:03,376 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-06T03:48:03,378 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-06T03:48:03,380 ERROR [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8-prefix:6f1b912b0816,34777,1733456862907 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39995,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:03,380 WARN [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8-prefix:6f1b912b0816,34777,1733456862907 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39995,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:03,380 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C34777%2C1733456862907:(num 1733456877980) roll requested 2024-12-06T03:48:03,380 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C34777%2C1733456862907.1733456883380 2024-12-06T03:48:03,387 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 newFile=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456883380 2024-12-06T03:48:03,388 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:03,388 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:03,388 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:03,388 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:03,388 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:03,388 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456883380 2024-12-06T03:48:03,388 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39995,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:03,388 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39995,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:03,389 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 2024-12-06T03:48:03,389 WARN [IPC Server handler 3 on default port 46537 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-06T03:48:03,389 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 after 0ms 2024-12-06T03:48:03,392 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38103:38103),(127.0.0.1/127.0.0.1:43205:43205)] 2024-12-06T03:48:03,392 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 is not closed yet, will try archiving it next time 2024-12-06T03:48:03,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:03,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:04,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:04,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:05,393 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C34777%2C1733456862907.1733456885393 2024-12-06T03:48:05,398 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456883380 newFile=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 2024-12-06T03:48:05,399 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:05,399 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:05,399 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:05,399 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:05,399 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:05,399 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456883380 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 2024-12-06T03:48:05,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741838_1019 (size=1264) 2024-12-06T03:48:05,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741838_1019 (size=1264) 2024-12-06T03:48:05,402 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 is not closed yet, will try archiving it next time 2024-12-06T03:48:05,403 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43205:43205),(127.0.0.1/127.0.0.1:38103:38103)] 2024-12-06T03:48:05,403 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 is not closed yet, will try archiving it next time 2024-12-06T03:48:05,403 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 2024-12-06T03:48:05,403 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 2024-12-06T03:48:05,403 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 after 0ms 2024-12-06T03:48:05,403 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 2024-12-06T03:48:05,413 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733456865324/Put/vlen=218/seqid=0] 2024-12-06T03:48:05,413 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733456874620/Put/vlen=1045/seqid=0] 2024-12-06T03:48:05,413 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456863926 2024-12-06T03:48:05,413 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 2024-12-06T03:48:05,413 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 2024-12-06T03:48:05,414 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 after 1ms 2024-12-06T03:48:05,414 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 2024-12-06T03:48:05,417 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733456877980/Put/vlen=1045/seqid=0] 2024-12-06T03:48:05,417 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733456879995/Put/vlen=1045/seqid=0] 2024-12-06T03:48:05,417 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 2024-12-06T03:48:05,417 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456883380 2024-12-06T03:48:05,417 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456883380 2024-12-06T03:48:05,418 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456883380 after 1ms 2024-12-06T03:48:05,418 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456883380 2024-12-06T03:48:05,420 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733456883380/Put/vlen=1045/seqid=0] 2024-12-06T03:48:05,421 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 2024-12-06T03:48:05,421 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 2024-12-06T03:48:05,421 WARN [IPC Server handler 1 on default port 46537 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-06T03:48:05,421 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 after 0ms 2024-12-06T03:48:05,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:05,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:05,724 WARN [ResponseProcessor for block BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:05,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_997564354_22 at /127.0.0.1:42496 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41117:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42496 dst: /127.0.0.1:41117 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41117 remote=/127.0.0.1:42496]. Total timeout mills is 60000, 59673 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:48:05,724 WARN [DataStreamer for file /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 block BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41117,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK], DatanodeInfoWithStorage[127.0.0.1:42661,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41117,DS-ab410678-cb87-4057-8898-fe30977ecfa4,DISK]) is bad. 2024-12-06T03:48:05,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_997564354_22 at /127.0.0.1:60940 [Receiving block BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42661:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60940 dst: /127.0.0.1:42661 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:48:05,728 WARN [DataStreamer for file /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 block BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:05,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741839_1022 (size=85) 2024-12-06T03:48:05,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741839_1022 (size=85) 2024-12-06T03:48:06,561 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T03:48:06,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:06,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:07,390 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456877980 after 4001ms 2024-12-06T03:48:07,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:07,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:08,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:08,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:09,422 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 after 4001ms 2024-12-06T03:48:09,422 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 2024-12-06T03:48:09,426 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 2024-12-06T03:48:09,426 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-06T03:48:09,427 ERROR [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8-prefix:6f1b912b0816,34777,1733456862907.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:09,427 WARN [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8-prefix:6f1b912b0816,34777,1733456862907.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:09,427 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C34777%2C1733456862907.meta:.meta(num 1733456864324) roll requested 2024-12-06T03:48:09,427 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C34777%2C1733456862907.meta.1733456889427.meta 2024-12-06T03:48:09,432 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,433 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,433 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,433 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,433 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,433 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456889427.meta 2024-12-06T03:48:09,433 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:09,433 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:09,434 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta 2024-12-06T03:48:09,434 WARN [IPC Server handler 0 on default port 46537 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-12-06T03:48:09,434 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta after 0ms 2024-12-06T03:48:09,434 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43205:43205),(127.0.0.1/127.0.0.1:38103:38103)] 2024-12-06T03:48:09,434 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta is not closed yet, will try archiving it next time 2024-12-06T03:48:09,449 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/.tmp/info/429baebffede42449a38f0d8ae66ceeb is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df./info:regioninfo/1733456865329/Put/seqid=0 2024-12-06T03:48:09,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741841_1025 (size=7125) 2024-12-06T03:48:09,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741841_1025 (size=7125) 2024-12-06T03:48:09,458 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/.tmp/info/429baebffede42449a38f0d8ae66ceeb 2024-12-06T03:48:09,478 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/.tmp/ns/de3359b9a42148dc8f63ad13694e94bf is 43, key is default/ns:d/1733456864508/Put/seqid=0 2024-12-06T03:48:09,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741842_1026 (size=5153) 2024-12-06T03:48:09,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741842_1026 (size=5153) 2024-12-06T03:48:09,483 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/.tmp/ns/de3359b9a42148dc8f63ad13694e94bf 2024-12-06T03:48:09,502 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/.tmp/table/cf5269ae180f4bc18a2ad7c2643a419f is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733456865345/Put/seqid=0 2024-12-06T03:48:09,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741843_1027 (size=5438) 2024-12-06T03:48:09,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741843_1027 (size=5438) 2024-12-06T03:48:09,510 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/.tmp/table/cf5269ae180f4bc18a2ad7c2643a419f 2024-12-06T03:48:09,517 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/.tmp/info/429baebffede42449a38f0d8ae66ceeb as hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/info/429baebffede42449a38f0d8ae66ceeb 2024-12-06T03:48:09,523 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/info/429baebffede42449a38f0d8ae66ceeb, entries=10, sequenceid=11, filesize=7.0 K 2024-12-06T03:48:09,524 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/.tmp/ns/de3359b9a42148dc8f63ad13694e94bf as hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/ns/de3359b9a42148dc8f63ad13694e94bf 2024-12-06T03:48:09,529 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/ns/de3359b9a42148dc8f63ad13694e94bf, entries=2, sequenceid=11, filesize=5.0 K 2024-12-06T03:48:09,530 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/.tmp/table/cf5269ae180f4bc18a2ad7c2643a419f as hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/table/cf5269ae180f4bc18a2ad7c2643a419f 2024-12-06T03:48:09,535 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/table/cf5269ae180f4bc18a2ad7c2643a419f, entries=2, sequenceid=11, filesize=5.3 K 2024-12-06T03:48:09,536 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 110ms, sequenceid=11, compaction requested=false 2024-12-06T03:48:09,537 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-06T03:48:09,537 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 5b842cd62a62f90f865e01e7049cb2df 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-06T03:48:09,537 ERROR [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8-prefix:6f1b912b0816,34777,1733456862907 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:09,537 WARN [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8-prefix:6f1b912b0816,34777,1733456862907 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:09,538 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C34777%2C1733456862907:(num 1733456885393) roll requested 2024-12-06T03:48:09,538 INFO [regionserver/6f1b912b0816:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C34777%2C1733456862907.1733456889538 2024-12-06T03:48:09,543 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 newFile=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456889538 2024-12-06T03:48:09,544 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,544 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,544 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,544 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,544 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,544 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456889538 2024-12-06T03:48:09,544 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:09,545 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-341572201-172.17.0.2-1733456861200:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:09,545 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 2024-12-06T03:48:09,545 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 after 0ms 2024-12-06T03:48:09,547 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.1733456885393 to hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/oldWALs/6f1b912b0816%2C34777%2C1733456862907.1733456885393 2024-12-06T03:48:09,547 DEBUG [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38103:38103),(127.0.0.1/127.0.0.1:43205:43205)] 2024-12-06T03:48:09,559 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/default/TestLogRolling-testLogRollOnPipelineRestart/5b842cd62a62f90f865e01e7049cb2df/.tmp/info/43e5d9d0110847f690f225f966790ad0 is 1080, key is row1002/info:/1733456874620/Put/seqid=0 2024-12-06T03:48:09,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:09,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741845_1029 (size=9270) 2024-12-06T03:48:09,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741845_1029 (size=9270) 2024-12-06T03:48:09,564 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/default/TestLogRolling-testLogRollOnPipelineRestart/5b842cd62a62f90f865e01e7049cb2df/.tmp/info/43e5d9d0110847f690f225f966790ad0 2024-12-06T03:48:09,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:09,571 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/default/TestLogRolling-testLogRollOnPipelineRestart/5b842cd62a62f90f865e01e7049cb2df/.tmp/info/43e5d9d0110847f690f225f966790ad0 as hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/default/TestLogRolling-testLogRollOnPipelineRestart/5b842cd62a62f90f865e01e7049cb2df/info/43e5d9d0110847f690f225f966790ad0 2024-12-06T03:48:09,577 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/default/TestLogRolling-testLogRollOnPipelineRestart/5b842cd62a62f90f865e01e7049cb2df/info/43e5d9d0110847f690f225f966790ad0, entries=4, sequenceid=8, filesize=9.1 K 2024-12-06T03:48:09,578 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 5b842cd62a62f90f865e01e7049cb2df in 41ms, sequenceid=8, compaction requested=false 2024-12-06T03:48:09,578 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 5b842cd62a62f90f865e01e7049cb2df: 2024-12-06T03:48:09,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T03:48:09,583 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:48:09,583 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:48:09,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:48:09,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:48:09,583 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T03:48:09,584 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T03:48:09,584 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=681900402, stopped=false 2024-12-06T03:48:09,584 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6f1b912b0816,44283,1733456862758 2024-12-06T03:48:09,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:48:09,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:48:09,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:09,649 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:48:09,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:09,650 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:48:09,650 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:48:09,650 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:48:09,650 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:48:09,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:48:09,650 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6f1b912b0816,34777,1733456862907' ***** 2024-12-06T03:48:09,650 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T03:48:09,650 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T03:48:09,650 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(3091): Received CLOSE for 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(959): stopping server 6f1b912b0816,34777,1733456862907 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6f1b912b0816:34777. 2024-12-06T03:48:09,651 DEBUG [RS:0;6f1b912b0816:34777 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:48:09,651 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5b842cd62a62f90f865e01e7049cb2df, disabling compactions & flushes 2024-12-06T03:48:09,651 DEBUG [RS:0;6f1b912b0816:34777 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:48:09,651 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T03:48:09,651 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T03:48:09,651 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. after waiting 0 ms 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T03:48:09,651 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T03:48:09,651 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-06T03:48:09,651 DEBUG [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5b842cd62a62f90f865e01e7049cb2df=TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df.} 2024-12-06T03:48:09,651 DEBUG [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5b842cd62a62f90f865e01e7049cb2df 2024-12-06T03:48:09,651 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:48:09,652 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:48:09,652 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:48:09,652 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:48:09,652 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:48:09,655 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/default/TestLogRolling-testLogRollOnPipelineRestart/5b842cd62a62f90f865e01e7049cb2df/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-06T03:48:09,655 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-06T03:48:09,655 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:48:09,655 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5b842cd62a62f90f865e01e7049cb2df: Waiting for close lock at 1733456889651Running coprocessor pre-close hooks at 1733456889651Disabling compacts and flushes for region at 1733456889651Disabling writes for close at 1733456889651Writing region close event to WAL at 1733456889652 (+1 ms)Running coprocessor post-close hooks at 1733456889655 (+3 ms)Closed at 1733456889655 2024-12-06T03:48:09,655 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733456864546.5b842cd62a62f90f865e01e7049cb2df. 2024-12-06T03:48:09,655 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:48:09,656 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:48:09,656 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456889651Running coprocessor pre-close hooks at 1733456889651Disabling compacts and flushes for region at 1733456889651Disabling writes for close at 1733456889652 (+1 ms)Writing region close event to WAL at 1733456889653 (+1 ms)Running coprocessor post-close hooks at 1733456889655 (+2 ms)Closed at 1733456889656 (+1 ms) 2024-12-06T03:48:09,656 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T03:48:09,795 INFO [regionserver/6f1b912b0816:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:48:09,798 INFO [regionserver/6f1b912b0816:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T03:48:09,798 INFO [regionserver/6f1b912b0816:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T03:48:09,852 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(976): stopping server 6f1b912b0816,34777,1733456862907; all regions closed. 2024-12-06T03:48:09,852 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,852 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,852 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,853 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,853 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:09,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741840_1023 (size=825) 2024-12-06T03:48:09,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741840_1023 (size=825) 2024-12-06T03:48:10,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:10,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:11,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:11,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:12,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:12,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:12,725 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T03:48:12,737 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T03:48:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:48:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:48:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T03:48:13,435 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta after 4001ms 2024-12-06T03:48:13,435 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/WALs/6f1b912b0816,34777,1733456862907/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta to hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/oldWALs/6f1b912b0816%2C34777%2C1733456862907.meta.1733456864324.meta 2024-12-06T03:48:13,438 DEBUG [RS:0;6f1b912b0816:34777 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/oldWALs 2024-12-06T03:48:13,438 INFO [RS:0;6f1b912b0816:34777 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C34777%2C1733456862907.meta:.meta(num 1733456889427) 2024-12-06T03:48:13,438 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,438 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,438 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,439 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,439 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741844_1028 (size=1162) 2024-12-06T03:48:13,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741844_1028 (size=1162) 2024-12-06T03:48:13,444 DEBUG [RS:0;6f1b912b0816:34777 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/oldWALs 2024-12-06T03:48:13,444 INFO [RS:0;6f1b912b0816:34777 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C34777%2C1733456862907:(num 1733456889538) 2024-12-06T03:48:13,444 DEBUG [RS:0;6f1b912b0816:34777 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:48:13,444 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:48:13,444 INFO [RS:0;6f1b912b0816:34777 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:48:13,445 INFO [RS:0;6f1b912b0816:34777 {}] hbase.ChoreService(370): Chore service for: regionserver/6f1b912b0816:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T03:48:13,445 INFO [RS:0;6f1b912b0816:34777 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:48:13,445 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:48:13,445 INFO [RS:0;6f1b912b0816:34777 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34777 2024-12-06T03:48:13,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:48:13,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6f1b912b0816,34777,1733456862907 2024-12-06T03:48:13,487 INFO [RS:0;6f1b912b0816:34777 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:48:13,495 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6f1b912b0816,34777,1733456862907] 2024-12-06T03:48:13,503 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6f1b912b0816,34777,1733456862907 already deleted, retry=false 2024-12-06T03:48:13,503 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6f1b912b0816,34777,1733456862907 expired; onlineServers=0 2024-12-06T03:48:13,503 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6f1b912b0816,44283,1733456862758' ***** 2024-12-06T03:48:13,503 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T03:48:13,503 INFO [M:0;6f1b912b0816:44283 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:48:13,503 INFO [M:0;6f1b912b0816:44283 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:48:13,503 DEBUG [M:0;6f1b912b0816:44283 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T03:48:13,504 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T03:48:13,504 DEBUG [M:0;6f1b912b0816:44283 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T03:48:13,504 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456863658 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456863658,5,FailOnTimeoutGroup] 2024-12-06T03:48:13,504 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456863659 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456863659,5,FailOnTimeoutGroup] 2024-12-06T03:48:13,504 INFO [M:0;6f1b912b0816:44283 {}] hbase.ChoreService(370): Chore service for: master/6f1b912b0816:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T03:48:13,504 INFO [M:0;6f1b912b0816:44283 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:48:13,504 DEBUG [M:0;6f1b912b0816:44283 {}] master.HMaster(1795): Stopping service threads 2024-12-06T03:48:13,504 INFO [M:0;6f1b912b0816:44283 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T03:48:13,504 INFO [M:0;6f1b912b0816:44283 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:48:13,504 INFO [M:0;6f1b912b0816:44283 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T03:48:13,504 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T03:48:13,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T03:48:13,512 DEBUG [M:0;6f1b912b0816:44283 {}] zookeeper.ZKUtil(347): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T03:48:13,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:13,512 WARN [M:0;6f1b912b0816:44283 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T03:48:13,512 INFO [M:0;6f1b912b0816:44283 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/.lastflushedseqids 2024-12-06T03:48:13,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741846_1030 (size=111) 2024-12-06T03:48:13,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741846_1030 (size=111) 2024-12-06T03:48:13,518 INFO [M:0;6f1b912b0816:44283 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T03:48:13,518 INFO [M:0;6f1b912b0816:44283 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T03:48:13,518 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:48:13,518 INFO [M:0;6f1b912b0816:44283 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:48:13,518 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:48:13,518 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:48:13,518 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:48:13,518 INFO [M:0;6f1b912b0816:44283 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-06T03:48:13,519 ERROR [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData-prefix:6f1b912b0816,44283,1733456862758 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:13,519 WARN [FSHLog-0-hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData-prefix:6f1b912b0816,44283,1733456862758 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:13,519 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 6f1b912b0816%2C44283%2C1733456862758:(num 1733456863437) roll requested 2024-12-06T03:48:13,519 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C44283%2C1733456862758.1733456893519 2024-12-06T03:48:13,524 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,524 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,524 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,525 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,525 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,525 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456863437 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456893519 2024-12-06T03:48:13,525 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:13,525 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44745,DS-560d8678-e96e-448e-9b2f-0cb3d6e56b59,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T03:48:13,525 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456863437 2024-12-06T03:48:13,526 WARN [IPC Server handler 1 on default port 46537 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456863437 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-06T03:48:13,526 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456863437 after 1ms 2024-12-06T03:48:13,529 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43205:43205),(127.0.0.1/127.0.0.1:38103:38103)] 2024-12-06T03:48:13,529 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456863437 is not closed yet, will try archiving it next time 2024-12-06T03:48:13,543 DEBUG [M:0;6f1b912b0816:44283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/99263f3f5091432bbb7bf87e7349b624 is 82, key is hbase:meta,,1/info:regioninfo/1733456864361/Put/seqid=0 2024-12-06T03:48:13,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741848_1033 (size=5672) 2024-12-06T03:48:13,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741848_1033 (size=5672) 2024-12-06T03:48:13,548 INFO [M:0;6f1b912b0816:44283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/99263f3f5091432bbb7bf87e7349b624 2024-12-06T03:48:13,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:13,568 DEBUG [M:0;6f1b912b0816:44283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/63a80022e4794206902225c3b32f4985 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733456865353/Put/seqid=0 2024-12-06T03:48:13,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:13,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741849_1034 (size=6118) 2024-12-06T03:48:13,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741849_1034 (size=6118) 2024-12-06T03:48:13,573 INFO [M:0;6f1b912b0816:44283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/63a80022e4794206902225c3b32f4985 2024-12-06T03:48:13,590 DEBUG [M:0;6f1b912b0816:44283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9369ea7220484fc0818634d6900dd275 is 69, key is 6f1b912b0816,34777,1733456862907/rs:state/1733456863775/Put/seqid=0 2024-12-06T03:48:13,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741850_1035 (size=5156) 2024-12-06T03:48:13,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741850_1035 (size=5156) 2024-12-06T03:48:13,595 INFO [M:0;6f1b912b0816:44283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9369ea7220484fc0818634d6900dd275 2024-12-06T03:48:13,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:48:13,595 INFO [RS:0;6f1b912b0816:34777 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:48:13,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34777-0x101aa0a30d90001, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:48:13,595 INFO [RS:0;6f1b912b0816:34777 {}] regionserver.HRegionServer(1031): Exiting; stopping=6f1b912b0816,34777,1733456862907; zookeeper connection closed. 2024-12-06T03:48:13,596 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@416580c4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@416580c4 2024-12-06T03:48:13,596 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T03:48:13,619 DEBUG [M:0;6f1b912b0816:44283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/786497c8380a488882a652d2ef109a17 is 52, key is load_balancer_on/state:d/1733456864541/Put/seqid=0 2024-12-06T03:48:13,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741851_1036 (size=5056) 2024-12-06T03:48:13,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741851_1036 (size=5056) 2024-12-06T03:48:13,625 INFO [M:0;6f1b912b0816:44283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/786497c8380a488882a652d2ef109a17 2024-12-06T03:48:13,630 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/99263f3f5091432bbb7bf87e7349b624 as hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/99263f3f5091432bbb7bf87e7349b624 2024-12-06T03:48:13,635 INFO [M:0;6f1b912b0816:44283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/99263f3f5091432bbb7bf87e7349b624, entries=8, sequenceid=56, filesize=5.5 K 2024-12-06T03:48:13,637 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/63a80022e4794206902225c3b32f4985 as hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/63a80022e4794206902225c3b32f4985 2024-12-06T03:48:13,643 INFO [M:0;6f1b912b0816:44283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/63a80022e4794206902225c3b32f4985, entries=6, sequenceid=56, filesize=6.0 K 2024-12-06T03:48:13,644 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9369ea7220484fc0818634d6900dd275 as hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9369ea7220484fc0818634d6900dd275 2024-12-06T03:48:13,650 INFO [M:0;6f1b912b0816:44283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9369ea7220484fc0818634d6900dd275, entries=1, sequenceid=56, filesize=5.0 K 2024-12-06T03:48:13,651 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/786497c8380a488882a652d2ef109a17 as hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/786497c8380a488882a652d2ef109a17 2024-12-06T03:48:13,657 INFO [M:0;6f1b912b0816:44283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/786497c8380a488882a652d2ef109a17, entries=1, sequenceid=56, filesize=4.9 K 2024-12-06T03:48:13,658 INFO [M:0;6f1b912b0816:44283 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=56, compaction requested=false 2024-12-06T03:48:13,663 INFO [M:0;6f1b912b0816:44283 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:48:13,663 DEBUG [M:0;6f1b912b0816:44283 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456893518Disabling compacts and flushes for region at 1733456893518Disabling writes for close at 1733456893518Obtaining lock to block concurrent updates at 1733456893518Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733456893518Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733456893519 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733456893530 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733456893530Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733456893542 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733456893542Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733456893554 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733456893567 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733456893567Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733456893578 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733456893590 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733456893590Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733456893600 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733456893619 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733456893619Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46679e69: reopening flushed file at 1733456893629 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a82ccd0: reopening flushed file at 1733456893636 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b71013e: reopening flushed file at 1733456893643 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3270361e: reopening flushed file at 1733456893650 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=56, compaction requested=false at 1733456893658 (+8 ms)Writing region close event to WAL at 1733456893662 (+4 ms)Closed at 1733456893662 2024-12-06T03:48:13,663 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,663 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,663 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,663 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,663 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:13,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741847_1031 (size=757) 2024-12-06T03:48:13,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741847_1031 (size=757) 2024-12-06T03:48:14,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:14,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:14,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:14,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,189 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T03:48:15,190 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,190 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:15,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:15,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:15,726 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T03:48:16,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:16,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:17,527 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456863437 after 4002ms 2024-12-06T03:48:17,527 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/WALs/6f1b912b0816,44283,1733456862758/6f1b912b0816%2C44283%2C1733456862758.1733456863437 to hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/oldWALs/6f1b912b0816%2C44283%2C1733456862758.1733456863437 2024-12-06T03:48:17,530 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/MasterData/oldWALs/6f1b912b0816%2C44283%2C1733456862758.1733456863437 to hdfs://localhost:46537/user/jenkins/test-data/dcb9bc50-d530-0bf5-f77a-b2585f5d70a8/oldWALs/6f1b912b0816%2C44283%2C1733456862758.1733456863437$masterlocalwal$ 2024-12-06T03:48:17,530 INFO [M:0;6f1b912b0816:44283 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T03:48:17,530 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:48:17,530 INFO [M:0;6f1b912b0816:44283 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44283 2024-12-06T03:48:17,530 INFO [M:0;6f1b912b0816:44283 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:48:17,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:17,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:17,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:48:17,682 INFO [M:0;6f1b912b0816:44283 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:48:17,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44283-0x101aa0a30d90000, quorum=127.0.0.1:61821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:48:17,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4e7425dc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:48:17,685 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@44a4f3f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:48:17,685 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:48:17,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c977ad0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:48:17,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@276f61b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,STOPPED} 2024-12-06T03:48:17,686 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:48:17,686 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-341572201-172.17.0.2-1733456861200 (Datanode Uuid ec612081-1d55-4bcf-a279-2bf13117f3dd) service to localhost/127.0.0.1:46537 2024-12-06T03:48:17,686 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:48:17,686 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:48:17,687 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data3/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:48:17,687 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data4/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:48:17,687 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:48:17,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@31767992{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:48:17,689 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2859ae00{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:48:17,689 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:48:17,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@138a9f82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:48:17,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@312832b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,STOPPED} 2024-12-06T03:48:17,690 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:48:17,690 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:48:17,690 WARN [BP-341572201-172.17.0.2-1733456861200 heartbeating to localhost/127.0.0.1:46537 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-341572201-172.17.0.2-1733456861200 (Datanode Uuid 3bd61f82-a77d-402f-95dc-f24115d0445c) service to localhost/127.0.0.1:46537 2024-12-06T03:48:17,690 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:48:17,690 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data1/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:48:17,691 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/cluster_4a7ca053-6cef-bc7a-6801-30290ea9b170/data/data2/current/BP-341572201-172.17.0.2-1733456861200 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:48:17,691 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:48:17,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@633966fa{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:48:17,695 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@603c75b8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:48:17,695 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:48:17,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67b3f925{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:48:17,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15d59233{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir/,STOPPED} 2024-12-06T03:48:17,701 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T03:48:17,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T03:48:17,740 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46537 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46537 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46537 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46537 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46537 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:46537 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46537 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46537 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=459 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=228 (was 216) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7508 (was 7790) 2024-12-06T03:48:17,746 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=228, ProcessCount=11, AvailableMemoryMB=7507 2024-12-06T03:48:17,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T03:48:17,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.log.dir so I do NOT create it in target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2 2024-12-06T03:48:17,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6dae5e38-a634-745f-a22e-ae1d0ef7ec41/hadoop.tmp.dir so I do NOT create it in target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4, deleteOnExit=true 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/test.cache.data in system properties and HBase conf 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.log.dir in system properties and HBase conf 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T03:48:17,747 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T03:48:17,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/nfs.dump.dir in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/java.io.tmpdir in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T03:48:17,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T03:48:17,758 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:48:18,057 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:48:18,063 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:48:18,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:48:18,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:48:18,071 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:48:18,072 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:48:18,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c49ee69{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:48:18,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4179ed70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:48:18,165 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77bac3c5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/java.io.tmpdir/jetty-localhost-33253-hadoop-hdfs-3_4_1-tests_jar-_-any-14080165881382947933/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:48:18,166 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7eb75a65{HTTP/1.1, (http/1.1)}{localhost:33253} 2024-12-06T03:48:18,166 INFO [Time-limited test {}] server.Server(415): Started @191031ms 2024-12-06T03:48:18,176 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:48:18,365 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:48:18,368 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:48:18,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:48:18,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:48:18,369 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:48:18,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67e36879{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:48:18,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57426144{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:48:18,459 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25d1fa6d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/java.io.tmpdir/jetty-localhost-33567-hadoop-hdfs-3_4_1-tests_jar-_-any-16186124693400543444/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:48:18,459 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54c0aeb2{HTTP/1.1, (http/1.1)}{localhost:33567} 2024-12-06T03:48:18,459 INFO [Time-limited test {}] server.Server(415): Started @191324ms 2024-12-06T03:48:18,460 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:48:18,488 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:48:18,491 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:48:18,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:48:18,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:48:18,492 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:48:18,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6aa66066{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:48:18,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@713ba638{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:48:18,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:18,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:18,583 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f798335{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/java.io.tmpdir/jetty-localhost-35117-hadoop-hdfs-3_4_1-tests_jar-_-any-14910257532760808505/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:48:18,584 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@183e22a2{HTTP/1.1, (http/1.1)}{localhost:35117} 2024-12-06T03:48:18,584 INFO [Time-limited test {}] server.Server(415): Started @191449ms 2024-12-06T03:48:18,585 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:48:19,121 WARN [Thread-1634 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/data/data1/current/BP-77245198-172.17.0.2-1733456897772/current, will proceed with Du for space computation calculation, 2024-12-06T03:48:19,121 WARN [Thread-1635 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/data/data2/current/BP-77245198-172.17.0.2-1733456897772/current, will proceed with Du for space computation calculation, 2024-12-06T03:48:19,139 WARN [Thread-1598 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:48:19,141 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ad4357f7d285506 with lease ID 0x6a1087ca61edda1f: Processing first storage report for DS-5dc335cf-1724-496a-ac11-59fdb26f2c62 from datanode DatanodeRegistration(127.0.0.1:42909, datanodeUuid=516377b3-393b-4516-884e-64259e43aeb5, infoPort=46359, infoSecurePort=0, ipcPort=45161, storageInfo=lv=-57;cid=testClusterID;nsid=1781941656;c=1733456897772) 2024-12-06T03:48:19,141 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ad4357f7d285506 with lease ID 0x6a1087ca61edda1f: from storage DS-5dc335cf-1724-496a-ac11-59fdb26f2c62 node DatanodeRegistration(127.0.0.1:42909, datanodeUuid=516377b3-393b-4516-884e-64259e43aeb5, infoPort=46359, infoSecurePort=0, ipcPort=45161, storageInfo=lv=-57;cid=testClusterID;nsid=1781941656;c=1733456897772), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:48:19,141 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ad4357f7d285506 with lease ID 0x6a1087ca61edda1f: Processing first storage report for DS-e284bfdc-1fbc-4d0a-a928-b1a80b7c0fc9 from datanode DatanodeRegistration(127.0.0.1:42909, datanodeUuid=516377b3-393b-4516-884e-64259e43aeb5, infoPort=46359, infoSecurePort=0, ipcPort=45161, storageInfo=lv=-57;cid=testClusterID;nsid=1781941656;c=1733456897772) 2024-12-06T03:48:19,141 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ad4357f7d285506 with lease ID 0x6a1087ca61edda1f: from storage DS-e284bfdc-1fbc-4d0a-a928-b1a80b7c0fc9 node DatanodeRegistration(127.0.0.1:42909, datanodeUuid=516377b3-393b-4516-884e-64259e43aeb5, infoPort=46359, infoSecurePort=0, ipcPort=45161, storageInfo=lv=-57;cid=testClusterID;nsid=1781941656;c=1733456897772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:48:19,297 WARN [Thread-1645 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/data/data3/current/BP-77245198-172.17.0.2-1733456897772/current, will proceed with Du for space computation calculation, 2024-12-06T03:48:19,297 WARN [Thread-1646 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/data/data4/current/BP-77245198-172.17.0.2-1733456897772/current, will proceed with Du for space computation calculation, 2024-12-06T03:48:19,315 WARN [Thread-1621 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:48:19,317 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa08ee4093b1859d3 with lease ID 0x6a1087ca61edda20: Processing first storage report for DS-c5f8be3c-dab1-4c5d-9b76-a82dc2437ca9 from datanode DatanodeRegistration(127.0.0.1:45711, datanodeUuid=df0ec9d8-be90-4a6f-a932-8235874c1144, infoPort=41803, infoSecurePort=0, ipcPort=43427, storageInfo=lv=-57;cid=testClusterID;nsid=1781941656;c=1733456897772) 2024-12-06T03:48:19,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa08ee4093b1859d3 with lease ID 0x6a1087ca61edda20: from storage DS-c5f8be3c-dab1-4c5d-9b76-a82dc2437ca9 node DatanodeRegistration(127.0.0.1:45711, datanodeUuid=df0ec9d8-be90-4a6f-a932-8235874c1144, infoPort=41803, infoSecurePort=0, ipcPort=43427, storageInfo=lv=-57;cid=testClusterID;nsid=1781941656;c=1733456897772), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:48:19,318 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa08ee4093b1859d3 with lease ID 0x6a1087ca61edda20: Processing first storage report for DS-818b000a-5385-4199-8e1a-ff9c8e6ed64d from datanode DatanodeRegistration(127.0.0.1:45711, datanodeUuid=df0ec9d8-be90-4a6f-a932-8235874c1144, infoPort=41803, infoSecurePort=0, ipcPort=43427, storageInfo=lv=-57;cid=testClusterID;nsid=1781941656;c=1733456897772) 2024-12-06T03:48:19,318 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa08ee4093b1859d3 with lease ID 0x6a1087ca61edda20: from storage DS-818b000a-5385-4199-8e1a-ff9c8e6ed64d node DatanodeRegistration(127.0.0.1:45711, datanodeUuid=df0ec9d8-be90-4a6f-a932-8235874c1144, infoPort=41803, infoSecurePort=0, ipcPort=43427, storageInfo=lv=-57;cid=testClusterID;nsid=1781941656;c=1733456897772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:48:19,412 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2 2024-12-06T03:48:19,414 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/zookeeper_0, clientPort=51923, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T03:48:19,415 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51923 2024-12-06T03:48:19,415 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:48:19,416 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:48:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:48:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:48:19,427 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6 with version=8 2024-12-06T03:48:19,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/hbase-staging 2024-12-06T03:48:19,429 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:48:19,429 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:48:19,429 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:48:19,429 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:48:19,429 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:48:19,429 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:48:19,429 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T03:48:19,430 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:48:19,430 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33993 2024-12-06T03:48:19,431 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33993 connecting to ZooKeeper ensemble=127.0.0.1:51923 2024-12-06T03:48:19,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:339930x0, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:48:19,488 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33993-0x101aa0ac01a0000 connected 2024-12-06T03:48:19,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:48:19,553 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:48:19,555 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:48:19,555 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6, hbase.cluster.distributed=false 2024-12-06T03:48:19,557 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:48:19,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33993 2024-12-06T03:48:19,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33993 2024-12-06T03:48:19,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33993 2024-12-06T03:48:19,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33993 2024-12-06T03:48:19,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33993 2024-12-06T03:48:19,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:19,572 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:48:19,572 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:48:19,572 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:48:19,572 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:48:19,573 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:48:19,573 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:48:19,573 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T03:48:19,573 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:48:19,573 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39039 2024-12-06T03:48:19,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:19,575 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39039 connecting to ZooKeeper ensemble=127.0.0.1:51923 2024-12-06T03:48:19,576 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:48:19,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:48:19,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390390x0, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:48:19,590 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:390390x0, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:48:19,590 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39039-0x101aa0ac01a0001 connected 2024-12-06T03:48:19,590 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T03:48:19,591 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T03:48:19,591 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T03:48:19,592 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:48:19,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39039 2024-12-06T03:48:19,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39039 2024-12-06T03:48:19,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39039 2024-12-06T03:48:19,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39039 2024-12-06T03:48:19,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39039 2024-12-06T03:48:19,602 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6f1b912b0816:33993 2024-12-06T03:48:19,603 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6f1b912b0816,33993,1733456899429 2024-12-06T03:48:19,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:48:19,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:48:19,610 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6f1b912b0816,33993,1733456899429 2024-12-06T03:48:19,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T03:48:19,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,619 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T03:48:19,619 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6f1b912b0816,33993,1733456899429 from backup master directory 2024-12-06T03:48:19,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:48:19,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6f1b912b0816,33993,1733456899429 2024-12-06T03:48:19,627 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:48:19,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:48:19,627 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6f1b912b0816,33993,1733456899429 2024-12-06T03:48:19,631 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/hbase.id] with ID: acac490f-8e55-47e5-a17b-0d74a245b505 2024-12-06T03:48:19,631 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/.tmp/hbase.id 2024-12-06T03:48:19,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:48:19,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:48:19,639 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/.tmp/hbase.id]:[hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/hbase.id] 2024-12-06T03:48:19,650 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:48:19,650 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T03:48:19,651 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-06T03:48:19,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:48:19,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:48:19,666 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:48:19,667 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T03:48:19,668 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:48:19,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:48:19,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:48:19,675 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store 2024-12-06T03:48:19,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:48:19,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:48:19,682 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:48:19,682 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:48:19,682 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:48:19,682 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:48:19,682 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:48:19,682 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:48:19,682 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:48:19,682 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456899682Disabling compacts and flushes for region at 1733456899682Disabling writes for close at 1733456899682Writing region close event to WAL at 1733456899682Closed at 1733456899682 2024-12-06T03:48:19,683 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/.initializing 2024-12-06T03:48:19,683 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/WALs/6f1b912b0816,33993,1733456899429 2024-12-06T03:48:19,685 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C33993%2C1733456899429, suffix=, logDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/WALs/6f1b912b0816,33993,1733456899429, archiveDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/oldWALs, maxLogs=10 2024-12-06T03:48:19,685 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33993%2C1733456899429.1733456899685 2024-12-06T03:48:19,690 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/WALs/6f1b912b0816,33993,1733456899429/6f1b912b0816%2C33993%2C1733456899429.1733456899685 2024-12-06T03:48:19,690 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46359:46359),(127.0.0.1/127.0.0.1:41803:41803)] 2024-12-06T03:48:19,694 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:48:19,694 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:48:19,694 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,695 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,700 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T03:48:19,700 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:19,700 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:48:19,700 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T03:48:19,701 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:19,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:48:19,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T03:48:19,702 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:19,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:48:19,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T03:48:19,704 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:19,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:48:19,704 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,705 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,705 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,706 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,706 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,707 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T03:48:19,707 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:48:19,710 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:48:19,711 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711268, jitterRate=-0.0955767035484314}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T03:48:19,711 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733456899695Initializing all the Stores at 1733456899695Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456899695Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456899698 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456899698Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456899698Cleaning up temporary data from old regions at 1733456899706 (+8 ms)Region opened successfully at 1733456899711 (+5 ms) 2024-12-06T03:48:19,713 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T03:48:19,716 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2031472e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:48:19,716 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T03:48:19,717 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T03:48:19,717 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T03:48:19,717 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T03:48:19,717 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T03:48:19,717 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-06T03:48:19,717 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T03:48:19,721 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T03:48:19,722 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T03:48:19,739 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T03:48:19,740 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T03:48:19,740 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T03:48:19,748 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T03:48:19,748 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T03:48:19,749 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T03:48:19,760 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T03:48:19,761 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T03:48:19,768 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T03:48:19,770 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T03:48:19,781 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T03:48:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:48:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:48:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,790 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6f1b912b0816,33993,1733456899429, sessionid=0x101aa0ac01a0000, setting cluster-up flag (Was=false) 2024-12-06T03:48:19,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,831 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T03:48:19,832 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,33993,1733456899429 2024-12-06T03:48:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:19,877 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T03:48:19,878 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,33993,1733456899429 2024-12-06T03:48:19,879 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T03:48:19,881 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T03:48:19,881 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T03:48:19,881 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T03:48:19,882 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6f1b912b0816,33993,1733456899429 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T03:48:19,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:48:19,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:48:19,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:48:19,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:48:19,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6f1b912b0816:0, corePoolSize=10, maxPoolSize=10 2024-12-06T03:48:19,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:48:19,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733456929884 2024-12-06T03:48:19,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T03:48:19,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T03:48:19,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T03:48:19,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T03:48:19,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T03:48:19,884 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T03:48:19,885 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,885 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T03:48:19,885 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:48:19,885 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T03:48:19,885 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T03:48:19,885 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T03:48:19,885 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T03:48:19,885 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T03:48:19,885 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456899885,5,FailOnTimeoutGroup] 2024-12-06T03:48:19,886 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456899885,5,FailOnTimeoutGroup] 2024-12-06T03:48:19,886 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,886 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T03:48:19,886 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,886 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,886 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:19,886 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T03:48:19,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:48:19,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:48:19,894 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T03:48:19,895 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6 2024-12-06T03:48:19,895 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(746): ClusterId : acac490f-8e55-47e5-a17b-0d74a245b505 2024-12-06T03:48:19,895 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T03:48:19,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:48:19,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:48:19,901 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:48:19,902 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:48:19,904 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:48:19,904 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:19,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:48:19,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:48:19,905 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:48:19,905 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:19,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:48:19,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:48:19,907 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T03:48:19,907 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T03:48:19,907 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:48:19,907 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:19,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:48:19,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:48:19,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:48:19,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:19,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:48:19,909 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:48:19,909 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740 2024-12-06T03:48:19,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740 2024-12-06T03:48:19,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:48:19,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:48:19,911 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:48:19,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:48:19,915 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T03:48:19,915 DEBUG [RS:0;6f1b912b0816:39039 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@514eb9aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:48:19,919 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:48:19,920 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849119, jitterRate=0.07971188426017761}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:48:19,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733456899901Initializing all the Stores at 1733456899902 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456899902Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456899902Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456899902Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456899902Cleaning up temporary data from old regions at 1733456899911 (+9 ms)Region opened successfully at 1733456899920 (+9 ms) 2024-12-06T03:48:19,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:48:19,920 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:48:19,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:48:19,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:48:19,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:48:19,921 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:48:19,921 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456899920Disabling compacts and flushes for region at 1733456899920Disabling writes for close at 1733456899920Writing region close event to WAL at 1733456899921 (+1 ms)Closed at 1733456899921 2024-12-06T03:48:19,922 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:48:19,922 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T03:48:19,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T03:48:19,923 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:48:19,924 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T03:48:19,929 DEBUG [RS:0;6f1b912b0816:39039 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6f1b912b0816:39039 2024-12-06T03:48:19,929 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T03:48:19,929 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T03:48:19,929 DEBUG [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T03:48:19,929 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(2659): reportForDuty to master=6f1b912b0816,33993,1733456899429 with port=39039, startcode=1733456899572 2024-12-06T03:48:19,930 DEBUG [RS:0;6f1b912b0816:39039 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T03:48:19,931 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41493, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T03:48:19,932 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33993 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6f1b912b0816,39039,1733456899572 2024-12-06T03:48:19,932 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33993 {}] master.ServerManager(517): Registering regionserver=6f1b912b0816,39039,1733456899572 2024-12-06T03:48:19,933 DEBUG [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6 2024-12-06T03:48:19,933 DEBUG [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42313 2024-12-06T03:48:19,934 DEBUG [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T03:48:19,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:48:19,944 DEBUG [RS:0;6f1b912b0816:39039 {}] zookeeper.ZKUtil(111): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6f1b912b0816,39039,1733456899572 2024-12-06T03:48:19,944 WARN [RS:0;6f1b912b0816:39039 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:48:19,944 INFO [RS:0;6f1b912b0816:39039 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:48:19,944 DEBUG [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572 2024-12-06T03:48:19,944 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6f1b912b0816,39039,1733456899572] 2024-12-06T03:48:19,947 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T03:48:19,948 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T03:48:19,948 INFO [RS:0;6f1b912b0816:39039 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T03:48:19,948 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,948 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T03:48:19,949 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T03:48:19,949 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,949 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,950 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,950 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:48:19,950 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:48:19,950 DEBUG [RS:0;6f1b912b0816:39039 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:48:19,950 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,950 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,950 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,950 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,950 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,950 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,39039,1733456899572-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:48:19,962 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T03:48:19,962 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,39039,1733456899572-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,962 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,962 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.Replication(171): 6f1b912b0816,39039,1733456899572 started 2024-12-06T03:48:19,974 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:19,974 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(1482): Serving as 6f1b912b0816,39039,1733456899572, RpcServer on 6f1b912b0816/172.17.0.2:39039, sessionid=0x101aa0ac01a0001 2024-12-06T03:48:19,974 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T03:48:19,974 DEBUG [RS:0;6f1b912b0816:39039 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6f1b912b0816,39039,1733456899572 2024-12-06T03:48:19,974 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,39039,1733456899572' 2024-12-06T03:48:19,974 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T03:48:19,975 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T03:48:19,975 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T03:48:19,975 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T03:48:19,976 DEBUG [RS:0;6f1b912b0816:39039 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6f1b912b0816,39039,1733456899572 2024-12-06T03:48:19,976 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,39039,1733456899572' 2024-12-06T03:48:19,976 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T03:48:19,976 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T03:48:19,976 DEBUG [RS:0;6f1b912b0816:39039 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T03:48:19,976 INFO [RS:0;6f1b912b0816:39039 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T03:48:19,976 INFO [RS:0;6f1b912b0816:39039 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T03:48:20,074 WARN [6f1b912b0816:33993 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-06T03:48:20,078 INFO [RS:0;6f1b912b0816:39039 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C39039%2C1733456899572, suffix=, logDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572, archiveDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/oldWALs, maxLogs=32 2024-12-06T03:48:20,079 INFO [RS:0;6f1b912b0816:39039 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C39039%2C1733456899572.1733456900079 2024-12-06T03:48:20,084 INFO [RS:0;6f1b912b0816:39039 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456900079 2024-12-06T03:48:20,086 DEBUG [RS:0;6f1b912b0816:39039 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41803:41803),(127.0.0.1/127.0.0.1:46359:46359)] 2024-12-06T03:48:20,324 DEBUG [6f1b912b0816:33993 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T03:48:20,325 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6f1b912b0816,39039,1733456899572 2024-12-06T03:48:20,326 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,39039,1733456899572, state=OPENING 2024-12-06T03:48:20,356 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T03:48:20,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:20,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:48:20,407 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:48:20,407 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:48:20,407 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:48:20,407 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,39039,1733456899572}] 2024-12-06T03:48:20,559 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T03:48:20,561 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53639, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T03:48:20,564 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T03:48:20,564 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:48:20,566 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C39039%2C1733456899572.meta, suffix=.meta, logDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572, archiveDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/oldWALs, maxLogs=32 2024-12-06T03:48:20,566 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C39039%2C1733456899572.meta.1733456900566.meta 2024-12-06T03:48:20,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:20,571 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.meta.1733456900566.meta 2024-12-06T03:48:20,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:20,575 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41803:41803),(127.0.0.1/127.0.0.1:46359:46359)] 2024-12-06T03:48:20,577 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:48:20,577 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T03:48:20,577 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T03:48:20,577 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T03:48:20,577 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T03:48:20,577 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:48:20,577 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T03:48:20,577 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T03:48:20,579 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:48:20,580 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:48:20,580 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:20,580 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:48:20,580 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:48:20,581 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:48:20,581 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:20,581 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:48:20,581 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:48:20,582 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:48:20,582 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:20,582 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:48:20,582 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:48:20,583 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:48:20,583 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:20,583 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:48:20,583 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:48:20,584 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740 2024-12-06T03:48:20,585 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740 2024-12-06T03:48:20,586 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:48:20,586 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:48:20,587 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:48:20,588 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:48:20,589 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798968, jitterRate=0.015940994024276733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:48:20,589 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T03:48:20,589 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733456900577Writing region info on filesystem at 1733456900578 (+1 ms)Initializing all the Stores at 1733456900578Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456900579 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456900579Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456900579Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456900579Cleaning up temporary data from old regions at 1733456900586 (+7 ms)Running coprocessor post-open hooks at 1733456900589 (+3 ms)Region opened successfully at 1733456900589 2024-12-06T03:48:20,590 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733456900559 2024-12-06T03:48:20,592 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T03:48:20,592 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T03:48:20,593 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,39039,1733456899572 2024-12-06T03:48:20,594 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,39039,1733456899572, state=OPEN 2024-12-06T03:48:20,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:48:20,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:48:20,639 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6f1b912b0816,39039,1733456899572 2024-12-06T03:48:20,639 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:48:20,639 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:48:20,642 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T03:48:20,642 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,39039,1733456899572 in 232 msec 2024-12-06T03:48:20,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T03:48:20,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 720 msec 2024-12-06T03:48:20,645 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:48:20,645 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T03:48:20,646 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:48:20,646 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,39039,1733456899572, seqNum=-1] 2024-12-06T03:48:20,647 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:48:20,648 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43545, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:48:20,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 772 msec 2024-12-06T03:48:20,654 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733456900654, completionTime=-1 2024-12-06T03:48:20,654 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T03:48:20,654 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T03:48:20,656 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-06T03:48:20,656 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733456960656 2024-12-06T03:48:20,656 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733457020656 2024-12-06T03:48:20,656 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-06T03:48:20,657 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33993,1733456899429-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:20,657 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33993,1733456899429-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:20,657 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33993,1733456899429-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:20,657 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6f1b912b0816:33993, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:20,657 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:20,657 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:20,659 DEBUG [master/6f1b912b0816:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T03:48:20,661 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.034sec 2024-12-06T03:48:20,662 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T03:48:20,662 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T03:48:20,662 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T03:48:20,662 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T03:48:20,662 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T03:48:20,662 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33993,1733456899429-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:48:20,662 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33993,1733456899429-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T03:48:20,664 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T03:48:20,664 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T03:48:20,664 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33993,1733456899429-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:48:20,696 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39e530ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:48:20,696 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6f1b912b0816,33993,-1 for getting cluster id 2024-12-06T03:48:20,696 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T03:48:20,698 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'acac490f-8e55-47e5-a17b-0d74a245b505' 2024-12-06T03:48:20,699 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T03:48:20,699 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "acac490f-8e55-47e5-a17b-0d74a245b505" 2024-12-06T03:48:20,699 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50b21158, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:48:20,699 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6f1b912b0816,33993,-1] 2024-12-06T03:48:20,699 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T03:48:20,699 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:48:20,700 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44592, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T03:48:20,701 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9e7414, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:48:20,702 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:48:20,703 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,39039,1733456899572, seqNum=-1] 2024-12-06T03:48:20,703 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:48:20,704 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47536, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:48:20,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6f1b912b0816,33993,1733456899429 2024-12-06T03:48:20,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:48:20,708 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-06T03:48:20,708 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-06T03:48:20,709 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 6f1b912b0816,33993,1733456899429 2024-12-06T03:48:20,709 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@36cee0be 2024-12-06T03:48:20,709 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T03:48:20,711 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44606, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T03:48:20,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T03:48:20,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T03:48:20,712 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:48:20,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:48:20,716 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T03:48:20,717 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:20,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-06T03:48:20,718 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T03:48:20,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:48:20,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741835_1011 (size=405) 2024-12-06T03:48:20,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741835_1011 (size=405) 2024-12-06T03:48:20,733 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 31712bc8a4f16f676b40a5ad703890cb, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6 2024-12-06T03:48:20,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741836_1012 (size=88) 2024-12-06T03:48:20,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741836_1012 (size=88) 2024-12-06T03:48:20,745 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:48:20,745 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 31712bc8a4f16f676b40a5ad703890cb, disabling compactions & flushes 2024-12-06T03:48:20,745 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:20,745 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:20,745 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. after waiting 0 ms 2024-12-06T03:48:20,745 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:20,745 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:20,745 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 31712bc8a4f16f676b40a5ad703890cb: Waiting for close lock at 1733456900745Disabling compacts and flushes for region at 1733456900745Disabling writes for close at 1733456900745Writing region close event to WAL at 1733456900745Closed at 1733456900745 2024-12-06T03:48:20,747 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T03:48:20,747 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733456900747"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733456900747"}]},"ts":"1733456900747"} 2024-12-06T03:48:20,750 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-06T03:48:20,751 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T03:48:20,751 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456900751"}]},"ts":"1733456900751"} 2024-12-06T03:48:20,753 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-06T03:48:20,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=31712bc8a4f16f676b40a5ad703890cb, ASSIGN}] 2024-12-06T03:48:20,755 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=31712bc8a4f16f676b40a5ad703890cb, ASSIGN 2024-12-06T03:48:20,757 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=31712bc8a4f16f676b40a5ad703890cb, ASSIGN; state=OFFLINE, location=6f1b912b0816,39039,1733456899572; forceNewPlan=false, retain=false 2024-12-06T03:48:20,907 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=31712bc8a4f16f676b40a5ad703890cb, regionState=OPENING, regionLocation=6f1b912b0816,39039,1733456899572 2024-12-06T03:48:20,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=31712bc8a4f16f676b40a5ad703890cb, ASSIGN because future has completed 2024-12-06T03:48:20,910 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31712bc8a4f16f676b40a5ad703890cb, server=6f1b912b0816,39039,1733456899572}] 2024-12-06T03:48:21,066 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:21,066 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 31712bc8a4f16f676b40a5ad703890cb, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:48:21,066 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,066 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:48:21,066 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,066 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,067 INFO [StoreOpener-31712bc8a4f16f676b40a5ad703890cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,069 INFO [StoreOpener-31712bc8a4f16f676b40a5ad703890cb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31712bc8a4f16f676b40a5ad703890cb columnFamilyName info 2024-12-06T03:48:21,069 DEBUG [StoreOpener-31712bc8a4f16f676b40a5ad703890cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:48:21,069 INFO [StoreOpener-31712bc8a4f16f676b40a5ad703890cb-1 {}] regionserver.HStore(327): Store=31712bc8a4f16f676b40a5ad703890cb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:48:21,069 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,070 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,070 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,071 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,071 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,072 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,074 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:48:21,074 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 31712bc8a4f16f676b40a5ad703890cb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809723, jitterRate=0.029616758227348328}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T03:48:21,074 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:48:21,075 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 31712bc8a4f16f676b40a5ad703890cb: Running coprocessor pre-open hook at 1733456901066Writing region info on filesystem at 1733456901066Initializing all the Stores at 1733456901067 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456901067Cleaning up temporary data from old regions at 1733456901071 (+4 ms)Running coprocessor post-open hooks at 1733456901074 (+3 ms)Region opened successfully at 1733456901075 (+1 ms) 2024-12-06T03:48:21,076 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb., pid=6, masterSystemTime=1733456901062 2024-12-06T03:48:21,078 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:21,078 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:21,079 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=31712bc8a4f16f676b40a5ad703890cb, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,39039,1733456899572 2024-12-06T03:48:21,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31712bc8a4f16f676b40a5ad703890cb, server=6f1b912b0816,39039,1733456899572 because future has completed 2024-12-06T03:48:21,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T03:48:21,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 31712bc8a4f16f676b40a5ad703890cb, server=6f1b912b0816,39039,1733456899572 in 172 msec 2024-12-06T03:48:21,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T03:48:21,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=31712bc8a4f16f676b40a5ad703890cb, ASSIGN in 331 msec 2024-12-06T03:48:21,089 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T03:48:21,089 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456901089"}]},"ts":"1733456901089"} 2024-12-06T03:48:21,091 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-06T03:48:21,093 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T03:48:21,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 381 msec 2024-12-06T03:48:21,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:21,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:22,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:22,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:22,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:48:22,975 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-06T03:48:22,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:48:22,975 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-06T03:48:22,976 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:48:22,976 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-06T03:48:23,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:23,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:24,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:24,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:25,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:25,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:25,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:25,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,107 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T03:48:26,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:48:26,139 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T03:48:26,139 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-06T03:48:26,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:26,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:27,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:27,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:28,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:28,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:29,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:29,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:30,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:30,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:30,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:48:30,794 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-06T03:48:30,794 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-06T03:48:30,802 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:48:30,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:30,805 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb., hostname=6f1b912b0816,39039,1733456899572, seqNum=2] 2024-12-06T03:48:30,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:48:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:48:30,816 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T03:48:30,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T03:48:30,817 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T03:48:30,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T03:48:30,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39039 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-06T03:48:30,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:30,988 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 31712bc8a4f16f676b40a5ad703890cb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T03:48:31,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/c4b6eba41cba43e9a45eb74f5b184512 is 1080, key is row0001/info:/1733456910806/Put/seqid=0 2024-12-06T03:48:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741837_1013 (size=6033) 2024-12-06T03:48:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741837_1013 (size=6033) 2024-12-06T03:48:31,007 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/c4b6eba41cba43e9a45eb74f5b184512 2024-12-06T03:48:31,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/c4b6eba41cba43e9a45eb74f5b184512 as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/c4b6eba41cba43e9a45eb74f5b184512 2024-12-06T03:48:31,018 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/c4b6eba41cba43e9a45eb74f5b184512, entries=1, sequenceid=5, filesize=5.9 K 2024-12-06T03:48:31,019 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 31712bc8a4f16f676b40a5ad703890cb in 31ms, sequenceid=5, compaction requested=false 2024-12-06T03:48:31,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 31712bc8a4f16f676b40a5ad703890cb: 2024-12-06T03:48:31,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:31,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-06T03:48:31,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-06T03:48:31,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-06T03:48:31,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 205 msec 2024-12-06T03:48:31,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 216 msec 2024-12-06T03:48:31,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:31,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:32,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:32,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:33,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:33,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:34,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:34,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:35,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:35,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:36,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:36,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:37,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:37,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:38,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:38,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:39,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:39,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:40,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:40,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:40,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-06T03:48:40,901 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-06T03:48:40,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:48:40,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:48:40,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-06T03:48:40,906 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T03:48:40,907 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T03:48:40,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T03:48:41,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39039 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-06T03:48:41,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:41,061 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 31712bc8a4f16f676b40a5ad703890cb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T03:48:41,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/2b5b0633fe224544b69b6857d3749f7f is 1080, key is row0002/info:/1733456920902/Put/seqid=0 2024-12-06T03:48:41,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741838_1014 (size=6033) 2024-12-06T03:48:41,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741838_1014 (size=6033) 2024-12-06T03:48:41,074 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/2b5b0633fe224544b69b6857d3749f7f 2024-12-06T03:48:41,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/2b5b0633fe224544b69b6857d3749f7f as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/2b5b0633fe224544b69b6857d3749f7f 2024-12-06T03:48:41,085 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/2b5b0633fe224544b69b6857d3749f7f, entries=1, sequenceid=9, filesize=5.9 K 2024-12-06T03:48:41,086 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 31712bc8a4f16f676b40a5ad703890cb in 25ms, sequenceid=9, compaction requested=false 2024-12-06T03:48:41,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 31712bc8a4f16f676b40a5ad703890cb: 2024-12-06T03:48:41,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:41,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-06T03:48:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-06T03:48:41,090 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T03:48:41,090 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-12-06T03:48:41,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-12-06T03:48:41,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:41,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:42,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:42,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:43,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:43,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 after 68056ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:48:43,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:43,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T03:48:44,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:44,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:45,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:45,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:46,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:46,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:47,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:47,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:48,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:48,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:49,411 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T03:48:49,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:49,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:50,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:50,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:50,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-06T03:48:50,922 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-06T03:48:50,925 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C39039%2C1733456899572.1733456930925 2024-12-06T03:48:50,935 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:50,935 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:50,935 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:50,935 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:50,935 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:48:50,935 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456900079 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456930925 2024-12-06T03:48:50,939 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41803:41803),(127.0.0.1/127.0.0.1:46359:46359)] 2024-12-06T03:48:50,939 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456900079 is not closed yet, will try archiving it next time 2024-12-06T03:48:50,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:48:50,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741833_1009 (size=5546) 2024-12-06T03:48:50,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741833_1009 (size=5546) 2024-12-06T03:48:50,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:48:50,942 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T03:48:50,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-06T03:48:50,943 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T03:48:50,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T03:48:51,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39039 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-06T03:48:51,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:51,098 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 31712bc8a4f16f676b40a5ad703890cb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T03:48:51,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/5feef709f7cb4e0da0f2065ab52e7ab4 is 1080, key is row0003/info:/1733456930923/Put/seqid=0 2024-12-06T03:48:51,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741840_1016 (size=6033) 2024-12-06T03:48:51,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741840_1016 (size=6033) 2024-12-06T03:48:51,114 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/5feef709f7cb4e0da0f2065ab52e7ab4 2024-12-06T03:48:51,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/5feef709f7cb4e0da0f2065ab52e7ab4 as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/5feef709f7cb4e0da0f2065ab52e7ab4 2024-12-06T03:48:51,125 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/5feef709f7cb4e0da0f2065ab52e7ab4, entries=1, sequenceid=13, filesize=5.9 K 2024-12-06T03:48:51,126 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 31712bc8a4f16f676b40a5ad703890cb in 28ms, sequenceid=13, compaction requested=true 2024-12-06T03:48:51,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 31712bc8a4f16f676b40a5ad703890cb: 2024-12-06T03:48:51,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:48:51,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-06T03:48:51,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-06T03:48:51,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-06T03:48:51,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-12-06T03:48:51,132 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-12-06T03:48:51,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:51,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:52,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:52,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:53,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:53,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:54,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:54,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:55,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:55,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:56,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:56,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:57,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:57,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:58,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:58,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:59,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:48:59,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:00,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:00,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:00,665 INFO [master/6f1b912b0816:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T03:49:00,665 INFO [master/6f1b912b0816:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T03:49:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-06T03:49:01,032 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-06T03:49:01,032 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:49:01,033 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:49:01,033 DEBUG [Time-limited test {}] regionserver.HStore(1541): 31712bc8a4f16f676b40a5ad703890cb/info is initiating minor compaction (all files) 2024-12-06T03:49:01,033 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T03:49:01,033 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:01,033 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 31712bc8a4f16f676b40a5ad703890cb/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:49:01,034 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/c4b6eba41cba43e9a45eb74f5b184512, hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/2b5b0633fe224544b69b6857d3749f7f, hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/5feef709f7cb4e0da0f2065ab52e7ab4] into tmpdir=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp, totalSize=17.7 K 2024-12-06T03:49:01,034 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c4b6eba41cba43e9a45eb74f5b184512, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733456910806 2024-12-06T03:49:01,034 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2b5b0633fe224544b69b6857d3749f7f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733456920902 2024-12-06T03:49:01,035 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5feef709f7cb4e0da0f2065ab52e7ab4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733456930923 2024-12-06T03:49:01,045 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 31712bc8a4f16f676b40a5ad703890cb#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:01,046 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/26fd8419c04a4532a169cf577d26bcb4 is 1080, key is row0001/info:/1733456910806/Put/seqid=0 2024-12-06T03:49:01,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741841_1017 (size=8296) 2024-12-06T03:49:01,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741841_1017 (size=8296) 2024-12-06T03:49:01,057 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/26fd8419c04a4532a169cf577d26bcb4 as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/26fd8419c04a4532a169cf577d26bcb4 2024-12-06T03:49:01,065 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31712bc8a4f16f676b40a5ad703890cb/info of 31712bc8a4f16f676b40a5ad703890cb into 26fd8419c04a4532a169cf577d26bcb4(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:01,065 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 31712bc8a4f16f676b40a5ad703890cb: 2024-12-06T03:49:01,068 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C39039%2C1733456899572.1733456941068 2024-12-06T03:49:01,074 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:01,074 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:01,074 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:01,074 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:01,074 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:01,074 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456930925 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456941068 2024-12-06T03:49:01,075 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41803:41803),(127.0.0.1/127.0.0.1:46359:46359)] 2024-12-06T03:49:01,075 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456930925 is not closed yet, will try archiving it next time 2024-12-06T03:49:01,076 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456900079 to hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/oldWALs/6f1b912b0816%2C39039%2C1733456899572.1733456900079 2024-12-06T03:49:01,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741839_1015 (size=2520) 2024-12-06T03:49:01,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741839_1015 (size=2520) 2024-12-06T03:49:01,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:49:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:49:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-06T03:49:01,079 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T03:49:01,080 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T03:49:01,080 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T03:49:01,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39039 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-06T03:49:01,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:49:01,233 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 31712bc8a4f16f676b40a5ad703890cb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T03:49:01,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/4365910ba43a499ab8a9268b1f12a417 is 1080, key is row0000/info:/1733456941066/Put/seqid=0 2024-12-06T03:49:01,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741843_1019 (size=6033) 2024-12-06T03:49:01,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741843_1019 (size=6033) 2024-12-06T03:49:01,244 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/4365910ba43a499ab8a9268b1f12a417 2024-12-06T03:49:01,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/4365910ba43a499ab8a9268b1f12a417 as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/4365910ba43a499ab8a9268b1f12a417 2024-12-06T03:49:01,255 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/4365910ba43a499ab8a9268b1f12a417, entries=1, sequenceid=18, filesize=5.9 K 2024-12-06T03:49:01,257 INFO [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 31712bc8a4f16f676b40a5ad703890cb in 23ms, sequenceid=18, compaction requested=false 2024-12-06T03:49:01,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 31712bc8a4f16f676b40a5ad703890cb: 2024-12-06T03:49:01,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:49:01,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-06T03:49:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-06T03:49:01,262 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-06T03:49:01,262 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-06T03:49:01,265 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-12-06T03:49:01,477 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456930925 to hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/oldWALs/6f1b912b0816%2C39039%2C1733456899572.1733456930925 2024-12-06T03:49:01,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:01,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:02,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:02,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:03,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:03,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:04,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:04,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:05,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:05,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:06,066 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 31712bc8a4f16f676b40a5ad703890cb, had cached 0 bytes from a total of 14329 2024-12-06T03:49:06,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:06,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:07,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:07,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:08,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:08,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:09,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:09,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:10,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:10,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:11,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-06T03:49:11,092 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-06T03:49:11,094 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C39039%2C1733456899572.1733456951094 2024-12-06T03:49:11,099 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,099 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,099 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,099 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,099 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,099 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456941068 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456951094 2024-12-06T03:49:11,100 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46359:46359),(127.0.0.1/127.0.0.1:41803:41803)] 2024-12-06T03:49:11,100 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/WALs/6f1b912b0816,39039,1733456899572/6f1b912b0816%2C39039%2C1733456899572.1733456941068 is not closed yet, will try archiving it next time 2024-12-06T03:49:11,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T03:49:11,100 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:49:11,100 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:49:11,100 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:49:11,101 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:49:11,101 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T03:49:11,101 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T03:49:11,101 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1066708981, stopped=false 2024-12-06T03:49:11,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741842_1018 (size=2026) 2024-12-06T03:49:11,101 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6f1b912b0816,33993,1733456899429 2024-12-06T03:49:11,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741842_1018 (size=2026) 2024-12-06T03:49:11,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:49:11,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:49:11,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:11,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:11,158 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:49:11,158 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:49:11,158 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:49:11,158 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:49:11,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:49:11,158 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6f1b912b0816,39039,1733456899572' ***** 2024-12-06T03:49:11,158 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T03:49:11,159 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T03:49:11,159 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(3091): Received CLOSE for 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(959): stopping server 6f1b912b0816,39039,1733456899572 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6f1b912b0816:39039. 2024-12-06T03:49:11,159 DEBUG [RS:0;6f1b912b0816:39039 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:49:11,159 DEBUG [RS:0;6f1b912b0816:39039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:49:11,159 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 31712bc8a4f16f676b40a5ad703890cb, disabling compactions & flushes 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T03:49:11,159 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:49:11,159 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:49:11,159 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T03:49:11,160 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. after waiting 0 ms 2024-12-06T03:49:11,160 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:49:11,160 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-06T03:49:11,160 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 31712bc8a4f16f676b40a5ad703890cb 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T03:49:11,160 DEBUG [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 31712bc8a4f16f676b40a5ad703890cb=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.} 2024-12-06T03:49:11,160 DEBUG [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 31712bc8a4f16f676b40a5ad703890cb 2024-12-06T03:49:11,160 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:49:11,160 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:49:11,160 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:49:11,160 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:49:11,160 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:49:11,160 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-06T03:49:11,164 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/031efe8ab7c64ce2826a33dc1bf4806e is 1080, key is row0001/info:/1733456951093/Put/seqid=0 2024-12-06T03:49:11,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741845_1021 (size=6033) 2024-12-06T03:49:11,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741845_1021 (size=6033) 2024-12-06T03:49:11,169 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/031efe8ab7c64ce2826a33dc1bf4806e 2024-12-06T03:49:11,176 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/.tmp/info/031efe8ab7c64ce2826a33dc1bf4806e as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/031efe8ab7c64ce2826a33dc1bf4806e 2024-12-06T03:49:11,177 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/.tmp/info/19b6f817cb884c0e8e42e4e4212bff43 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb./info:regioninfo/1733456901079/Put/seqid=0 2024-12-06T03:49:11,182 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/031efe8ab7c64ce2826a33dc1bf4806e, entries=1, sequenceid=22, filesize=5.9 K 2024-12-06T03:49:11,183 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 31712bc8a4f16f676b40a5ad703890cb in 22ms, sequenceid=22, compaction requested=true 2024-12-06T03:49:11,187 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/c4b6eba41cba43e9a45eb74f5b184512, hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/2b5b0633fe224544b69b6857d3749f7f, hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/5feef709f7cb4e0da0f2065ab52e7ab4] to archive 2024-12-06T03:49:11,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741846_1022 (size=7308) 2024-12-06T03:49:11,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741846_1022 (size=7308) 2024-12-06T03:49:11,188 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T03:49:11,189 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/.tmp/info/19b6f817cb884c0e8e42e4e4212bff43 2024-12-06T03:49:11,190 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/c4b6eba41cba43e9a45eb74f5b184512 to hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/c4b6eba41cba43e9a45eb74f5b184512 2024-12-06T03:49:11,191 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/2b5b0633fe224544b69b6857d3749f7f to hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/2b5b0633fe224544b69b6857d3749f7f 2024-12-06T03:49:11,192 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/5feef709f7cb4e0da0f2065ab52e7ab4 to hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/info/5feef709f7cb4e0da0f2065ab52e7ab4 2024-12-06T03:49:11,193 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6f1b912b0816:33993 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-06T03:49:11,193 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c4b6eba41cba43e9a45eb74f5b184512=6033, 2b5b0633fe224544b69b6857d3749f7f=6033, 5feef709f7cb4e0da0f2065ab52e7ab4=6033] 2024-12-06T03:49:11,196 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/31712bc8a4f16f676b40a5ad703890cb/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-06T03:49:11,197 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:49:11,197 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 31712bc8a4f16f676b40a5ad703890cb: Waiting for close lock at 1733456951159Running coprocessor pre-close hooks at 1733456951159Disabling compacts and flushes for region at 1733456951159Disabling writes for close at 1733456951160 (+1 ms)Obtaining lock to block concurrent updates at 1733456951160Preparing flush snapshotting stores in 31712bc8a4f16f676b40a5ad703890cb at 1733456951160Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733456951160Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. at 1733456951161 (+1 ms)Flushing 31712bc8a4f16f676b40a5ad703890cb/info: creating writer at 1733456951161Flushing 31712bc8a4f16f676b40a5ad703890cb/info: appending metadata at 1733456951164 (+3 ms)Flushing 31712bc8a4f16f676b40a5ad703890cb/info: closing flushed file at 1733456951164Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77b23c97: reopening flushed file at 1733456951175 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 31712bc8a4f16f676b40a5ad703890cb in 22ms, sequenceid=22, compaction requested=true at 1733456951183 (+8 ms)Writing region close event to WAL at 1733456951193 (+10 ms)Running coprocessor post-close hooks at 1733456951197 (+4 ms)Closed at 1733456951197 2024-12-06T03:49:11,197 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733456900711.31712bc8a4f16f676b40a5ad703890cb. 2024-12-06T03:49:11,207 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/.tmp/ns/461de757f3904caa975c04d479593e1d is 43, key is default/ns:d/1733456900648/Put/seqid=0 2024-12-06T03:49:11,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741847_1023 (size=5153) 2024-12-06T03:49:11,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741847_1023 (size=5153) 2024-12-06T03:49:11,212 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/.tmp/ns/461de757f3904caa975c04d479593e1d 2024-12-06T03:49:11,233 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/.tmp/table/366caa0a54794ffcaad7a83fd17064cb is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733456901089/Put/seqid=0 2024-12-06T03:49:11,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741848_1024 (size=5508) 2024-12-06T03:49:11,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741848_1024 (size=5508) 2024-12-06T03:49:11,238 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/.tmp/table/366caa0a54794ffcaad7a83fd17064cb 2024-12-06T03:49:11,242 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/.tmp/info/19b6f817cb884c0e8e42e4e4212bff43 as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/info/19b6f817cb884c0e8e42e4e4212bff43 2024-12-06T03:49:11,247 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/info/19b6f817cb884c0e8e42e4e4212bff43, entries=10, sequenceid=11, filesize=7.1 K 2024-12-06T03:49:11,248 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/.tmp/ns/461de757f3904caa975c04d479593e1d as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/ns/461de757f3904caa975c04d479593e1d 2024-12-06T03:49:11,252 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/ns/461de757f3904caa975c04d479593e1d, entries=2, sequenceid=11, filesize=5.0 K 2024-12-06T03:49:11,253 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/.tmp/table/366caa0a54794ffcaad7a83fd17064cb as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/table/366caa0a54794ffcaad7a83fd17064cb 2024-12-06T03:49:11,257 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/table/366caa0a54794ffcaad7a83fd17064cb, entries=2, sequenceid=11, filesize=5.4 K 2024-12-06T03:49:11,258 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false 2024-12-06T03:49:11,262 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-06T03:49:11,263 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:49:11,263 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:49:11,263 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456951160Running coprocessor pre-close hooks at 1733456951160Disabling compacts and flushes for region at 1733456951160Disabling writes for close at 1733456951160Obtaining lock to block concurrent updates at 1733456951160Preparing flush snapshotting stores in 1588230740 at 1733456951160Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733456951160Flushing stores of hbase:meta,,1.1588230740 at 1733456951161 (+1 ms)Flushing 1588230740/info: creating writer at 1733456951161Flushing 1588230740/info: appending metadata at 1733456951176 (+15 ms)Flushing 1588230740/info: closing flushed file at 1733456951176Flushing 1588230740/ns: creating writer at 1733456951194 (+18 ms)Flushing 1588230740/ns: appending metadata at 1733456951206 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1733456951206Flushing 1588230740/table: creating writer at 1733456951216 (+10 ms)Flushing 1588230740/table: appending metadata at 1733456951232 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733456951232Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d4e57b0: reopening flushed file at 1733456951242 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5252b85: reopening flushed file at 1733456951247 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78df2951: reopening flushed file at 1733456951252 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false at 1733456951258 (+6 ms)Writing region close event to WAL at 1733456951259 (+1 ms)Running coprocessor post-close hooks at 1733456951263 (+4 ms)Closed at 1733456951263 2024-12-06T03:49:11,263 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T03:49:11,360 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(976): stopping server 6f1b912b0816,39039,1733456899572; all regions closed. 2024-12-06T03:49:11,360 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,360 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,360 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,361 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,361 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741834_1010 (size=3306) 2024-12-06T03:49:11,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741834_1010 (size=3306) 2024-12-06T03:49:11,364 DEBUG [RS:0;6f1b912b0816:39039 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/oldWALs 2024-12-06T03:49:11,364 INFO [RS:0;6f1b912b0816:39039 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C39039%2C1733456899572.meta:.meta(num 1733456900566) 2024-12-06T03:49:11,364 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,364 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,364 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,364 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,364 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741844_1020 (size=1252) 2024-12-06T03:49:11,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741844_1020 (size=1252) 2024-12-06T03:49:11,368 DEBUG [RS:0;6f1b912b0816:39039 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/oldWALs 2024-12-06T03:49:11,368 INFO [RS:0;6f1b912b0816:39039 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C39039%2C1733456899572:(num 1733456951094) 2024-12-06T03:49:11,368 DEBUG [RS:0;6f1b912b0816:39039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:49:11,368 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:49:11,368 INFO [RS:0;6f1b912b0816:39039 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:49:11,369 INFO [RS:0;6f1b912b0816:39039 {}] hbase.ChoreService(370): Chore service for: regionserver/6f1b912b0816:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T03:49:11,369 INFO [RS:0;6f1b912b0816:39039 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:49:11,369 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:49:11,369 INFO [RS:0;6f1b912b0816:39039 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39039 2024-12-06T03:49:11,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6f1b912b0816,39039,1733456899572 2024-12-06T03:49:11,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:49:11,391 INFO [RS:0;6f1b912b0816:39039 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:49:11,399 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6f1b912b0816,39039,1733456899572] 2024-12-06T03:49:11,407 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6f1b912b0816,39039,1733456899572 already deleted, retry=false 2024-12-06T03:49:11,408 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6f1b912b0816,39039,1733456899572 expired; onlineServers=0 2024-12-06T03:49:11,408 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6f1b912b0816,33993,1733456899429' ***** 2024-12-06T03:49:11,408 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T03:49:11,408 INFO [M:0;6f1b912b0816:33993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:49:11,408 INFO [M:0;6f1b912b0816:33993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:49:11,408 DEBUG [M:0;6f1b912b0816:33993 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T03:49:11,408 DEBUG [M:0;6f1b912b0816:33993 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T03:49:11,408 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T03:49:11,408 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456899885 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456899885,5,FailOnTimeoutGroup] 2024-12-06T03:49:11,408 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456899885 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456899885,5,FailOnTimeoutGroup] 2024-12-06T03:49:11,408 INFO [M:0;6f1b912b0816:33993 {}] hbase.ChoreService(370): Chore service for: master/6f1b912b0816:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T03:49:11,408 INFO [M:0;6f1b912b0816:33993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:49:11,408 DEBUG [M:0;6f1b912b0816:33993 {}] master.HMaster(1795): Stopping service threads 2024-12-06T03:49:11,408 INFO [M:0;6f1b912b0816:33993 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T03:49:11,408 INFO [M:0;6f1b912b0816:33993 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:49:11,409 INFO [M:0;6f1b912b0816:33993 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T03:49:11,409 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T03:49:11,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T03:49:11,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:11,416 DEBUG [M:0;6f1b912b0816:33993 {}] zookeeper.ZKUtil(347): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T03:49:11,416 WARN [M:0;6f1b912b0816:33993 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T03:49:11,416 INFO [M:0;6f1b912b0816:33993 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/.lastflushedseqids 2024-12-06T03:49:11,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741849_1025 (size=130) 2024-12-06T03:49:11,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741849_1025 (size=130) 2024-12-06T03:49:11,421 INFO [M:0;6f1b912b0816:33993 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T03:49:11,421 INFO [M:0;6f1b912b0816:33993 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T03:49:11,422 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:49:11,422 INFO [M:0;6f1b912b0816:33993 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:49:11,422 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:49:11,422 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:49:11,422 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:49:11,422 INFO [M:0;6f1b912b0816:33993 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.89 KB 2024-12-06T03:49:11,436 DEBUG [M:0;6f1b912b0816:33993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f08f5f8b934d414f821d31619af6dc66 is 82, key is hbase:meta,,1/info:regioninfo/1733456900593/Put/seqid=0 2024-12-06T03:49:11,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741850_1026 (size=5672) 2024-12-06T03:49:11,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741850_1026 (size=5672) 2024-12-06T03:49:11,440 INFO [M:0;6f1b912b0816:33993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f08f5f8b934d414f821d31619af6dc66 2024-12-06T03:49:11,457 DEBUG [M:0;6f1b912b0816:33993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b0f0e5c7f5945bbb0a4571e33d7a63f is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733456901094/Put/seqid=0 2024-12-06T03:49:11,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741851_1027 (size=7817) 2024-12-06T03:49:11,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741851_1027 (size=7817) 2024-12-06T03:49:11,464 INFO [M:0;6f1b912b0816:33993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.93 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b0f0e5c7f5945bbb0a4571e33d7a63f 2024-12-06T03:49:11,469 INFO [M:0;6f1b912b0816:33993 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b0f0e5c7f5945bbb0a4571e33d7a63f 2024-12-06T03:49:11,487 DEBUG [M:0;6f1b912b0816:33993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/92ed98640791468ea4f89163214b3e6b is 69, key is 6f1b912b0816,39039,1733456899572/rs:state/1733456899932/Put/seqid=0 2024-12-06T03:49:11,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741852_1028 (size=5156) 2024-12-06T03:49:11,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741852_1028 (size=5156) 2024-12-06T03:49:11,491 INFO [M:0;6f1b912b0816:33993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/92ed98640791468ea4f89163214b3e6b 2024-12-06T03:49:11,499 INFO [RS:0;6f1b912b0816:39039 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:49:11,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:49:11,499 INFO [RS:0;6f1b912b0816:39039 {}] regionserver.HRegionServer(1031): Exiting; stopping=6f1b912b0816,39039,1733456899572; zookeeper connection closed. 2024-12-06T03:49:11,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39039-0x101aa0ac01a0001, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:49:11,500 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3265da1b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3265da1b 2024-12-06T03:49:11,500 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T03:49:11,515 DEBUG [M:0;6f1b912b0816:33993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/71409759e4e340fa81cb2614b46144a1 is 52, key is load_balancer_on/state:d/1733456900707/Put/seqid=0 2024-12-06T03:49:11,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741853_1029 (size=5056) 2024-12-06T03:49:11,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741853_1029 (size=5056) 2024-12-06T03:49:11,520 INFO [M:0;6f1b912b0816:33993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/71409759e4e340fa81cb2614b46144a1 2024-12-06T03:49:11,527 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f08f5f8b934d414f821d31619af6dc66 as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f08f5f8b934d414f821d31619af6dc66 2024-12-06T03:49:11,532 INFO [M:0;6f1b912b0816:33993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f08f5f8b934d414f821d31619af6dc66, entries=8, sequenceid=121, filesize=5.5 K 2024-12-06T03:49:11,533 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b0f0e5c7f5945bbb0a4571e33d7a63f as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b0f0e5c7f5945bbb0a4571e33d7a63f 2024-12-06T03:49:11,537 INFO [M:0;6f1b912b0816:33993 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b0f0e5c7f5945bbb0a4571e33d7a63f 2024-12-06T03:49:11,537 INFO [M:0;6f1b912b0816:33993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b0f0e5c7f5945bbb0a4571e33d7a63f, entries=14, sequenceid=121, filesize=7.6 K 2024-12-06T03:49:11,538 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/92ed98640791468ea4f89163214b3e6b as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/92ed98640791468ea4f89163214b3e6b 2024-12-06T03:49:11,542 INFO [M:0;6f1b912b0816:33993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/92ed98640791468ea4f89163214b3e6b, entries=1, sequenceid=121, filesize=5.0 K 2024-12-06T03:49:11,543 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/71409759e4e340fa81cb2614b46144a1 as hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/71409759e4e340fa81cb2614b46144a1 2024-12-06T03:49:11,548 INFO [M:0;6f1b912b0816:33993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42313/user/jenkins/test-data/1d2e36ef-c882-576f-870b-66ddec52cdd6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/71409759e4e340fa81cb2614b46144a1, entries=1, sequenceid=121, filesize=4.9 K 2024-12-06T03:49:11,549 INFO [M:0;6f1b912b0816:33993 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44581, heapSize ~54.83 KB/56144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=121, compaction requested=false 2024-12-06T03:49:11,550 INFO [M:0;6f1b912b0816:33993 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:49:11,550 DEBUG [M:0;6f1b912b0816:33993 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456951422Disabling compacts and flushes for region at 1733456951422Disabling writes for close at 1733456951422Obtaining lock to block concurrent updates at 1733456951422Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733456951422Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44581, getHeapSize=56144, getOffHeapSize=0, getCellsCount=140 at 1733456951422Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733456951423 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733456951423Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733456951435 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733456951435Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733456951444 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733456951457 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733456951457Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733456951469 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733456951486 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733456951486Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733456951496 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733456951515 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733456951515Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e245191: reopening flushed file at 1733456951526 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@169a9098: reopening flushed file at 1733456951532 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@498b5cf4: reopening flushed file at 1733456951537 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d69f7ed: reopening flushed file at 1733456951542 (+5 ms)Finished flush of dataSize ~43.54 KB/44581, heapSize ~54.83 KB/56144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=121, compaction requested=false at 1733456951549 (+7 ms)Writing region close event to WAL at 1733456951550 (+1 ms)Closed at 1733456951550 2024-12-06T03:49:11,550 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,550 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,550 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,550 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,550 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:49:11,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45711 is added to blk_1073741830_1006 (size=52978) 2024-12-06T03:49:11,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741830_1006 (size=52978) 2024-12-06T03:49:11,552 INFO [M:0;6f1b912b0816:33993 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T03:49:11,552 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:49:11,552 INFO [M:0;6f1b912b0816:33993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33993 2024-12-06T03:49:11,553 INFO [M:0;6f1b912b0816:33993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:49:11,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:11,674 INFO [M:0;6f1b912b0816:33993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:49:11,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:49:11,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33993-0x101aa0ac01a0000, quorum=127.0.0.1:51923, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:49:11,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f798335{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:49:11,677 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@183e22a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:49:11,677 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:49:11,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@713ba638{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:49:11,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6aa66066{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.log.dir/,STOPPED} 2024-12-06T03:49:11,679 WARN [BP-77245198-172.17.0.2-1733456897772 heartbeating to localhost/127.0.0.1:42313 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:49:11,679 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:49:11,679 WARN [BP-77245198-172.17.0.2-1733456897772 heartbeating to localhost/127.0.0.1:42313 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-77245198-172.17.0.2-1733456897772 (Datanode Uuid df0ec9d8-be90-4a6f-a932-8235874c1144) service to localhost/127.0.0.1:42313 2024-12-06T03:49:11,679 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:49:11,679 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/data/data3/current/BP-77245198-172.17.0.2-1733456897772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:49:11,679 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/data/data4/current/BP-77245198-172.17.0.2-1733456897772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:49:11,679 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:49:11,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25d1fa6d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:49:11,681 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54c0aeb2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:49:11,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:11,681 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:49:11,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57426144{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:49:11,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67e36879{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.log.dir/,STOPPED} 2024-12-06T03:49:11,683 WARN [BP-77245198-172.17.0.2-1733456897772 heartbeating to localhost/127.0.0.1:42313 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:49:11,683 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:49:11,683 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:49:11,683 WARN [BP-77245198-172.17.0.2-1733456897772 heartbeating to localhost/127.0.0.1:42313 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-77245198-172.17.0.2-1733456897772 (Datanode Uuid 516377b3-393b-4516-884e-64259e43aeb5) service to localhost/127.0.0.1:42313 2024-12-06T03:49:11,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/data/data1/current/BP-77245198-172.17.0.2-1733456897772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:49:11,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/cluster_982115ce-c627-04ca-804b-a75754f22ea4/data/data2/current/BP-77245198-172.17.0.2-1733456897772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:49:11,683 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:49:11,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77bac3c5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:49:11,689 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7eb75a65{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:49:11,690 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:49:11,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4179ed70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:49:11,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c49ee69{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.log.dir/,STOPPED} 2024-12-06T03:49:11,695 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T03:49:11,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T03:49:11,727 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 180) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42313 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/6f1b912b0816:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42313 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42313 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42313 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42313 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 459) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=142 (was 228), ProcessCount=11 (was 11), AvailableMemoryMB=7439 (was 7507) 2024-12-06T03:49:11,734 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=142, ProcessCount=11, AvailableMemoryMB=7440 2024-12-06T03:49:11,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T03:49:11,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.log.dir so I do NOT create it in target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074 2024-12-06T03:49:11,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/146e0cbf-1678-e1d2-2331-abc008befcd2/hadoop.tmp.dir so I do NOT create it in target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074 2024-12-06T03:49:11,734 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1, deleteOnExit=true 2024-12-06T03:49:11,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/test.cache.data in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.log.dir in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T03:49:11,735 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T03:49:11,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:49:11,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:49:11,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T03:49:11,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/nfs.dump.dir in system properties and HBase conf 2024-12-06T03:49:11,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/java.io.tmpdir in system properties and HBase conf 2024-12-06T03:49:11,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:49:11,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T03:49:11,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T03:49:11,747 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:49:11,953 INFO [regionserver/6f1b912b0816:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:49:12,050 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:49:12,055 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:49:12,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:49:12,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:49:12,056 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:49:12,057 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:49:12,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50c11e4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:49:12,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a75c30e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:49:12,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@549b308b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/java.io.tmpdir/jetty-localhost-36465-hadoop-hdfs-3_4_1-tests_jar-_-any-12335805180498297282/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:49:12,151 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@516aaa5c{HTTP/1.1, (http/1.1)}{localhost:36465} 2024-12-06T03:49:12,151 INFO [Time-limited test {}] server.Server(415): Started @245016ms 2024-12-06T03:49:12,162 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:49:12,363 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:49:12,365 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:49:12,366 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:49:12,366 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:49:12,366 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:49:12,366 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@500bd0b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:49:12,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10eef1f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:49:12,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26c07cbd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/java.io.tmpdir/jetty-localhost-39857-hadoop-hdfs-3_4_1-tests_jar-_-any-6089002615069182145/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:49:12,465 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6daf3b21{HTTP/1.1, (http/1.1)}{localhost:39857} 2024-12-06T03:49:12,465 INFO [Time-limited test {}] server.Server(415): Started @245331ms 2024-12-06T03:49:12,466 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:49:12,492 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:49:12,494 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:49:12,495 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:49:12,495 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:49:12,495 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:49:12,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@446d58fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:49:12,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c6d9015{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:49:12,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@311e029{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/java.io.tmpdir/jetty-localhost-45157-hadoop-hdfs-3_4_1-tests_jar-_-any-5425610322189130750/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:49:12,593 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78e8d841{HTTP/1.1, (http/1.1)}{localhost:45157} 2024-12-06T03:49:12,593 INFO [Time-limited test {}] server.Server(415): Started @245458ms 2024-12-06T03:49:12,594 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:49:12,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:12,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:49:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:49:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T03:49:12,976 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T03:49:13,211 WARN [Thread-1947 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/data/data1/current/BP-893510456-172.17.0.2-1733456951750/current, will proceed with Du for space computation calculation, 2024-12-06T03:49:13,211 WARN [Thread-1948 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/data/data2/current/BP-893510456-172.17.0.2-1733456951750/current, will proceed with Du for space computation calculation, 2024-12-06T03:49:13,229 WARN [Thread-1911 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:49:13,232 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x561bf97a62bf59ca with lease ID 0x6d0f4140bc2631cf: Processing first storage report for DS-56b3390f-fca3-45d5-9505-13194fa0752e from datanode DatanodeRegistration(127.0.0.1:35517, datanodeUuid=eb089f06-7085-48dd-8873-ee2654e36b3b, infoPort=35419, infoSecurePort=0, ipcPort=38565, storageInfo=lv=-57;cid=testClusterID;nsid=1748423559;c=1733456951750) 2024-12-06T03:49:13,232 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x561bf97a62bf59ca with lease ID 0x6d0f4140bc2631cf: from storage DS-56b3390f-fca3-45d5-9505-13194fa0752e node DatanodeRegistration(127.0.0.1:35517, datanodeUuid=eb089f06-7085-48dd-8873-ee2654e36b3b, infoPort=35419, infoSecurePort=0, ipcPort=38565, storageInfo=lv=-57;cid=testClusterID;nsid=1748423559;c=1733456951750), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:49:13,232 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x561bf97a62bf59ca with lease ID 0x6d0f4140bc2631cf: Processing first storage report for DS-b1510bbe-9f23-40f9-87b7-94d0bd109e4a from datanode DatanodeRegistration(127.0.0.1:35517, datanodeUuid=eb089f06-7085-48dd-8873-ee2654e36b3b, infoPort=35419, infoSecurePort=0, ipcPort=38565, storageInfo=lv=-57;cid=testClusterID;nsid=1748423559;c=1733456951750) 2024-12-06T03:49:13,232 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x561bf97a62bf59ca with lease ID 0x6d0f4140bc2631cf: from storage DS-b1510bbe-9f23-40f9-87b7-94d0bd109e4a node DatanodeRegistration(127.0.0.1:35517, datanodeUuid=eb089f06-7085-48dd-8873-ee2654e36b3b, infoPort=35419, infoSecurePort=0, ipcPort=38565, storageInfo=lv=-57;cid=testClusterID;nsid=1748423559;c=1733456951750), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:49:13,312 WARN [Thread-1959 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/data/data4/current/BP-893510456-172.17.0.2-1733456951750/current, will proceed with Du for space computation calculation, 2024-12-06T03:49:13,312 WARN [Thread-1958 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/data/data3/current/BP-893510456-172.17.0.2-1733456951750/current, will proceed with Du for space computation calculation, 2024-12-06T03:49:13,332 WARN [Thread-1934 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:49:13,334 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd398131d47b65c09 with lease ID 0x6d0f4140bc2631d0: Processing first storage report for DS-c32e7b9f-0f92-4880-8322-5e05e3de4cc1 from datanode DatanodeRegistration(127.0.0.1:39961, datanodeUuid=fec8d7bc-7d4a-47fc-a8d5-8a5734f3ff80, infoPort=39135, infoSecurePort=0, ipcPort=34941, storageInfo=lv=-57;cid=testClusterID;nsid=1748423559;c=1733456951750) 2024-12-06T03:49:13,334 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd398131d47b65c09 with lease ID 0x6d0f4140bc2631d0: from storage DS-c32e7b9f-0f92-4880-8322-5e05e3de4cc1 node DatanodeRegistration(127.0.0.1:39961, datanodeUuid=fec8d7bc-7d4a-47fc-a8d5-8a5734f3ff80, infoPort=39135, infoSecurePort=0, ipcPort=34941, storageInfo=lv=-57;cid=testClusterID;nsid=1748423559;c=1733456951750), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:49:13,334 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd398131d47b65c09 with lease ID 0x6d0f4140bc2631d0: Processing first storage report for DS-ec0cd4ef-25a6-4a9a-b8da-f2dff634860a from datanode DatanodeRegistration(127.0.0.1:39961, datanodeUuid=fec8d7bc-7d4a-47fc-a8d5-8a5734f3ff80, infoPort=39135, infoSecurePort=0, ipcPort=34941, storageInfo=lv=-57;cid=testClusterID;nsid=1748423559;c=1733456951750) 2024-12-06T03:49:13,334 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd398131d47b65c09 with lease ID 0x6d0f4140bc2631d0: from storage DS-ec0cd4ef-25a6-4a9a-b8da-f2dff634860a node DatanodeRegistration(127.0.0.1:39961, datanodeUuid=fec8d7bc-7d4a-47fc-a8d5-8a5734f3ff80, infoPort=39135, infoSecurePort=0, ipcPort=34941, storageInfo=lv=-57;cid=testClusterID;nsid=1748423559;c=1733456951750), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:49:13,419 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074 2024-12-06T03:49:13,422 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/zookeeper_0, clientPort=64235, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T03:49:13,422 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64235 2024-12-06T03:49:13,423 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:49:13,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:49:13,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:49:13,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:49:13,432 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56 with version=8 2024-12-06T03:49:13,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/hbase-staging 2024-12-06T03:49:13,434 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:49:13,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:49:13,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:49:13,434 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:49:13,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:49:13,434 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:49:13,435 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T03:49:13,435 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:49:13,435 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33419 2024-12-06T03:49:13,436 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33419 connecting to ZooKeeper ensemble=127.0.0.1:64235 2024-12-06T03:49:13,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:334190x0, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:49:13,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33419-0x101aa0b93120000 connected 2024-12-06T03:49:13,558 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:49:13,559 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:49:13,561 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:49:13,561 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56, hbase.cluster.distributed=false 2024-12-06T03:49:13,562 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:49:13,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33419 2024-12-06T03:49:13,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33419 2024-12-06T03:49:13,563 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33419 2024-12-06T03:49:13,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33419 2024-12-06T03:49:13,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33419 2024-12-06T03:49:13,577 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:49:13,577 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:49:13,577 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:49:13,577 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:49:13,577 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:49:13,577 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:49:13,577 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T03:49:13,577 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:49:13,578 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33823 2024-12-06T03:49:13,579 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33823 connecting to ZooKeeper ensemble=127.0.0.1:64235 2024-12-06T03:49:13,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:49:13,581 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:49:13,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338230x0, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:49:13,591 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33823-0x101aa0b93120001 connected 2024-12-06T03:49:13,591 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:49:13,591 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T03:49:13,592 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T03:49:13,592 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T03:49:13,593 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:49:13,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33823 2024-12-06T03:49:13,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33823 2024-12-06T03:49:13,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33823 2024-12-06T03:49:13,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33823 2024-12-06T03:49:13,594 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33823 2024-12-06T03:49:13,609 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6f1b912b0816:33419 2024-12-06T03:49:13,609 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6f1b912b0816,33419,1733456953434 2024-12-06T03:49:13,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:49:13,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:49:13,616 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6f1b912b0816,33419,1733456953434 2024-12-06T03:49:13,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T03:49:13,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,624 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T03:49:13,625 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6f1b912b0816,33419,1733456953434 from backup master directory 2024-12-06T03:49:13,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:13,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:49:13,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6f1b912b0816,33419,1733456953434 2024-12-06T03:49:13,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:49:13,632 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:49:13,632 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6f1b912b0816,33419,1733456953434 2024-12-06T03:49:13,636 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/hbase.id] with ID: 2625c5df-b1c8-4c44-8233-e80f98cd518c 2024-12-06T03:49:13,636 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/.tmp/hbase.id 2024-12-06T03:49:13,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:49:13,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:49:13,642 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/.tmp/hbase.id]:[hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/hbase.id] 2024-12-06T03:49:13,653 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:49:13,653 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T03:49:13,654 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-06T03:49:13,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:49:13,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:49:13,672 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:49:13,673 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T03:49:13,673 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:49:13,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:13,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:49:13,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:49:13,689 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store 2024-12-06T03:49:13,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:49:13,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:49:13,697 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:49:13,697 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:49:13,697 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:49:13,697 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:49:13,697 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:49:13,697 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:49:13,697 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:49:13,697 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733456953697Disabling compacts and flushes for region at 1733456953697Disabling writes for close at 1733456953697Writing region close event to WAL at 1733456953697Closed at 1733456953697 2024-12-06T03:49:13,698 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/.initializing 2024-12-06T03:49:13,698 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/WALs/6f1b912b0816,33419,1733456953434 2024-12-06T03:49:13,700 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C33419%2C1733456953434, suffix=, logDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/WALs/6f1b912b0816,33419,1733456953434, archiveDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/oldWALs, maxLogs=10 2024-12-06T03:49:13,701 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33419%2C1733456953434.1733456953701 2024-12-06T03:49:13,705 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/WALs/6f1b912b0816,33419,1733456953434/6f1b912b0816%2C33419%2C1733456953434.1733456953701 2024-12-06T03:49:13,710 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35419:35419),(127.0.0.1/127.0.0.1:39135:39135)] 2024-12-06T03:49:13,715 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:49:13,715 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:49:13,715 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,715 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,716 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T03:49:13,717 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:13,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:49:13,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T03:49:13,719 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:13,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:49:13,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T03:49:13,720 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:13,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:49:13,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T03:49:13,721 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:13,722 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:49:13,722 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,722 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,723 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,724 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,724 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,724 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T03:49:13,725 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:49:13,727 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:49:13,728 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708909, jitterRate=-0.0985766053199768}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T03:49:13,728 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733456953715Initializing all the Stores at 1733456953716 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456953716Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456953716Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456953716Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456953716Cleaning up temporary data from old regions at 1733456953724 (+8 ms)Region opened successfully at 1733456953728 (+4 ms) 2024-12-06T03:49:13,729 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T03:49:13,732 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ff5ec96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:49:13,733 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T03:49:13,733 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T03:49:13,733 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T03:49:13,733 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T03:49:13,734 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T03:49:13,734 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-06T03:49:13,734 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T03:49:13,736 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T03:49:13,737 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T03:49:13,749 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T03:49:13,749 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T03:49:13,750 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T03:49:13,757 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T03:49:13,758 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T03:49:13,758 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T03:49:13,766 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T03:49:13,766 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T03:49:13,774 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T03:49:13,775 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T03:49:13,782 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T03:49:13,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:49:13,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:49:13,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,791 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6f1b912b0816,33419,1733456953434, sessionid=0x101aa0b93120000, setting cluster-up flag (Was=false) 2024-12-06T03:49:13,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,832 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T03:49:13,833 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,33419,1733456953434 2024-12-06T03:49:13,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:13,874 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T03:49:13,875 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,33419,1733456953434 2024-12-06T03:49:13,876 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T03:49:13,877 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T03:49:13,878 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T03:49:13,878 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T03:49:13,878 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6f1b912b0816,33419,1733456953434 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T03:49:13,879 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:49:13,879 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:49:13,879 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:49:13,879 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:49:13,880 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6f1b912b0816:0, corePoolSize=10, maxPoolSize=10 2024-12-06T03:49:13,880 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,880 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:49:13,880 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,882 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733456983882 2024-12-06T03:49:13,882 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T03:49:13,882 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T03:49:13,882 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T03:49:13,882 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T03:49:13,882 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T03:49:13,882 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T03:49:13,882 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T03:49:13,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T03:49:13,883 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:49:13,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T03:49:13,883 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T03:49:13,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T03:49:13,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T03:49:13,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456953883,5,FailOnTimeoutGroup] 2024-12-06T03:49:13,883 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456953883,5,FailOnTimeoutGroup] 2024-12-06T03:49:13,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T03:49:13,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,883 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,884 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:13,884 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T03:49:13,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:49:13,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:49:13,890 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T03:49:13,890 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56 2024-12-06T03:49:13,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:49:13,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:49:13,896 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(746): ClusterId : 2625c5df-b1c8-4c44-8233-e80f98cd518c 2024-12-06T03:49:13,896 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T03:49:13,907 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T03:49:13,907 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T03:49:13,916 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T03:49:13,917 DEBUG [RS:0;6f1b912b0816:33823 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c28c2f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:49:13,926 DEBUG [RS:0;6f1b912b0816:33823 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6f1b912b0816:33823 2024-12-06T03:49:13,926 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T03:49:13,926 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T03:49:13,926 DEBUG [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T03:49:13,927 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(2659): reportForDuty to master=6f1b912b0816,33419,1733456953434 with port=33823, startcode=1733456953577 2024-12-06T03:49:13,927 DEBUG [RS:0;6f1b912b0816:33823 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T03:49:13,929 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34173, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T03:49:13,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33419 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6f1b912b0816,33823,1733456953577 2024-12-06T03:49:13,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33419 {}] master.ServerManager(517): Registering regionserver=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:13,931 DEBUG [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56 2024-12-06T03:49:13,931 DEBUG [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37527 2024-12-06T03:49:13,931 DEBUG [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T03:49:13,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:49:13,941 DEBUG [RS:0;6f1b912b0816:33823 {}] zookeeper.ZKUtil(111): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6f1b912b0816,33823,1733456953577 2024-12-06T03:49:13,941 WARN [RS:0;6f1b912b0816:33823 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:49:13,941 INFO [RS:0;6f1b912b0816:33823 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:49:13,942 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6f1b912b0816,33823,1733456953577] 2024-12-06T03:49:13,942 DEBUG [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/WALs/6f1b912b0816,33823,1733456953577 2024-12-06T03:49:13,945 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T03:49:13,946 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T03:49:13,946 INFO [RS:0;6f1b912b0816:33823 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T03:49:13,946 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,946 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T03:49:13,947 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T03:49:13,947 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,947 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,948 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:49:13,948 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:49:13,948 DEBUG [RS:0;6f1b912b0816:33823 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:49:13,948 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,948 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,948 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,948 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,948 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,948 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33823,1733456953577-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:49:13,961 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T03:49:13,961 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33823,1733456953577-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,961 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,962 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.Replication(171): 6f1b912b0816,33823,1733456953577 started 2024-12-06T03:49:13,974 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:13,974 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(1482): Serving as 6f1b912b0816,33823,1733456953577, RpcServer on 6f1b912b0816/172.17.0.2:33823, sessionid=0x101aa0b93120001 2024-12-06T03:49:13,975 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T03:49:13,975 DEBUG [RS:0;6f1b912b0816:33823 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6f1b912b0816,33823,1733456953577 2024-12-06T03:49:13,975 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,33823,1733456953577' 2024-12-06T03:49:13,975 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T03:49:13,975 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T03:49:13,975 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T03:49:13,975 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T03:49:13,976 DEBUG [RS:0;6f1b912b0816:33823 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6f1b912b0816,33823,1733456953577 2024-12-06T03:49:13,976 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,33823,1733456953577' 2024-12-06T03:49:13,976 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T03:49:13,976 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T03:49:13,976 DEBUG [RS:0;6f1b912b0816:33823 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T03:49:13,976 INFO [RS:0;6f1b912b0816:33823 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T03:49:13,976 INFO [RS:0;6f1b912b0816:33823 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T03:49:14,078 INFO [RS:0;6f1b912b0816:33823 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C33823%2C1733456953577, suffix=, logDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/WALs/6f1b912b0816,33823,1733456953577, archiveDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/oldWALs, maxLogs=32 2024-12-06T03:49:14,078 INFO [RS:0;6f1b912b0816:33823 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33823%2C1733456953577.1733456954078 2024-12-06T03:49:14,083 INFO [RS:0;6f1b912b0816:33823 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/WALs/6f1b912b0816,33823,1733456953577/6f1b912b0816%2C33823%2C1733456953577.1733456954078 2024-12-06T03:49:14,083 DEBUG [RS:0;6f1b912b0816:33823 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35419:35419),(127.0.0.1/127.0.0.1:39135:39135)] 2024-12-06T03:49:14,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:49:14,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:49:14,298 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:49:14,298 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:14,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:49:14,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:49:14,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:49:14,300 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:14,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:49:14,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:49:14,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:49:14,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:14,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:49:14,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:49:14,302 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:49:14,302 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:14,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:49:14,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:49:14,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740 2024-12-06T03:49:14,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740 2024-12-06T03:49:14,305 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:49:14,305 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:49:14,305 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:49:14,306 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:49:14,308 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:49:14,309 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871508, jitterRate=0.10818064212799072}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:49:14,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733456954296Initializing all the Stores at 1733456954297 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456954297Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456954297Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456954297Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456954297Cleaning up temporary data from old regions at 1733456954305 (+8 ms)Region opened successfully at 1733456954310 (+5 ms) 2024-12-06T03:49:14,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:49:14,310 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:49:14,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:49:14,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:49:14,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:49:14,310 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:49:14,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733456954310Disabling compacts and flushes for region at 1733456954310Disabling writes for close at 1733456954310Writing region close event to WAL at 1733456954310Closed at 1733456954310 2024-12-06T03:49:14,312 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:49:14,312 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T03:49:14,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T03:49:14,313 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:49:14,314 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T03:49:14,465 DEBUG [6f1b912b0816:33419 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T03:49:14,465 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:14,466 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,33823,1733456953577, state=OPENING 2024-12-06T03:49:14,524 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T03:49:14,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:14,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:49:14,533 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:49:14,533 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:49:14,533 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:49:14,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,33823,1733456953577}] 2024-12-06T03:49:14,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:14,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:14,686 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T03:49:14,687 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49769, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T03:49:14,691 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T03:49:14,691 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:49:14,692 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C33823%2C1733456953577.meta, suffix=.meta, logDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/WALs/6f1b912b0816,33823,1733456953577, archiveDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/oldWALs, maxLogs=32 2024-12-06T03:49:14,693 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33823%2C1733456953577.meta.1733456954693.meta 2024-12-06T03:49:14,702 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/WALs/6f1b912b0816,33823,1733456953577/6f1b912b0816%2C33823%2C1733456953577.meta.1733456954693.meta 2024-12-06T03:49:14,707 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39135:39135),(127.0.0.1/127.0.0.1:35419:35419)] 2024-12-06T03:49:14,711 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:49:14,711 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T03:49:14,711 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T03:49:14,711 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T03:49:14,711 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T03:49:14,712 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:49:14,712 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T03:49:14,712 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T03:49:14,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:49:14,718 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:49:14,718 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:14,719 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:49:14,719 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:49:14,719 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:49:14,719 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:14,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:49:14,720 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:49:14,721 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:49:14,721 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:14,721 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:49:14,721 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:49:14,721 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:49:14,722 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:14,722 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:49:14,722 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:49:14,723 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740 2024-12-06T03:49:14,723 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740 2024-12-06T03:49:14,724 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:49:14,724 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:49:14,725 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:49:14,726 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:49:14,726 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804123, jitterRate=0.022495418787002563}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:49:14,726 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T03:49:14,727 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733456954712Writing region info on filesystem at 1733456954712Initializing all the Stores at 1733456954713 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456954713Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456954717 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456954717Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733456954717Cleaning up temporary data from old regions at 1733456954724 (+7 ms)Running coprocessor post-open hooks at 1733456954726 (+2 ms)Region opened successfully at 1733456954727 (+1 ms) 2024-12-06T03:49:14,728 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733456954686 2024-12-06T03:49:14,730 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T03:49:14,730 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T03:49:14,731 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:14,732 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,33823,1733456953577, state=OPEN 2024-12-06T03:49:14,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:49:14,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:49:14,770 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:14,770 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:49:14,770 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:49:14,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T03:49:14,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,33823,1733456953577 in 237 msec 2024-12-06T03:49:14,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T03:49:14,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 461 msec 2024-12-06T03:49:14,775 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:49:14,775 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T03:49:14,777 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:49:14,777 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,33823,1733456953577, seqNum=-1] 2024-12-06T03:49:14,777 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:49:14,778 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47401, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:49:14,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 906 msec 2024-12-06T03:49:14,784 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733456954784, completionTime=-1 2024-12-06T03:49:14,784 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T03:49:14,784 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733457014786 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733457074786 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33419,1733456953434-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33419,1733456953434-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33419,1733456953434-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6f1b912b0816:33419, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:14,786 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:14,788 DEBUG [master/6f1b912b0816:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T03:49:14,789 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.156sec 2024-12-06T03:49:14,789 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T03:49:14,789 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T03:49:14,789 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T03:49:14,789 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T03:49:14,789 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T03:49:14,789 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33419,1733456953434-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:49:14,790 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33419,1733456953434-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T03:49:14,792 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T03:49:14,792 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T03:49:14,792 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,33419,1733456953434-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:49:14,796 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@583c44b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:49:14,796 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6f1b912b0816,33419,-1 for getting cluster id 2024-12-06T03:49:14,797 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T03:49:14,798 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2625c5df-b1c8-4c44-8233-e80f98cd518c' 2024-12-06T03:49:14,798 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T03:49:14,798 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2625c5df-b1c8-4c44-8233-e80f98cd518c" 2024-12-06T03:49:14,798 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22b40858, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:49:14,798 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6f1b912b0816,33419,-1] 2024-12-06T03:49:14,798 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T03:49:14,798 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:49:14,799 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52898, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T03:49:14,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24ddf4bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:49:14,800 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:49:14,801 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,33823,1733456953577, seqNum=-1] 2024-12-06T03:49:14,801 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:49:14,802 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47108, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:49:14,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6f1b912b0816,33419,1733456953434 2024-12-06T03:49:14,804 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:49:14,806 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-06T03:49:14,806 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-06T03:49:14,807 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 6f1b912b0816,33419,1733456953434 2024-12-06T03:49:14,807 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@17c909cf 2024-12-06T03:49:14,807 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T03:49:14,808 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T03:49:14,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33419 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T03:49:14,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33419 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T03:49:14,809 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33419 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:49:14,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33419 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-06T03:49:14,811 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T03:49:14,811 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:14,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33419 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-06T03:49:14,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T03:49:14,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:49:14,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741835_1011 (size=381) 2024-12-06T03:49:14,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741835_1011 (size=381) 2024-12-06T03:49:14,820 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6737d9a3b3f50844292ae9b1e1e8e03d, NAME => 'TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56 2024-12-06T03:49:14,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741836_1012 (size=64) 2024-12-06T03:49:14,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741836_1012 (size=64) 2024-12-06T03:49:14,826 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:49:14,826 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 6737d9a3b3f50844292ae9b1e1e8e03d, disabling compactions & flushes 2024-12-06T03:49:14,826 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:14,826 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:14,826 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. after waiting 0 ms 2024-12-06T03:49:14,826 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:14,826 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:14,826 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6737d9a3b3f50844292ae9b1e1e8e03d: Waiting for close lock at 1733456954826Disabling compacts and flushes for region at 1733456954826Disabling writes for close at 1733456954826Writing region close event to WAL at 1733456954826Closed at 1733456954826 2024-12-06T03:49:14,827 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T03:49:14,828 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733456954827"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733456954827"}]},"ts":"1733456954827"} 2024-12-06T03:49:14,830 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-06T03:49:14,831 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T03:49:14,831 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456954831"}]},"ts":"1733456954831"} 2024-12-06T03:49:14,833 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-06T03:49:14,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6737d9a3b3f50844292ae9b1e1e8e03d, ASSIGN}] 2024-12-06T03:49:14,834 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6737d9a3b3f50844292ae9b1e1e8e03d, ASSIGN 2024-12-06T03:49:14,835 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6737d9a3b3f50844292ae9b1e1e8e03d, ASSIGN; state=OFFLINE, location=6f1b912b0816,33823,1733456953577; forceNewPlan=false, retain=false 2024-12-06T03:49:14,986 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6737d9a3b3f50844292ae9b1e1e8e03d, regionState=OPENING, regionLocation=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:14,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6737d9a3b3f50844292ae9b1e1e8e03d, ASSIGN because future has completed 2024-12-06T03:49:14,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577}] 2024-12-06T03:49:15,145 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:15,145 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6737d9a3b3f50844292ae9b1e1e8e03d, NAME => 'TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:49:15,145 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,145 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:49:15,145 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,145 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,146 INFO [StoreOpener-6737d9a3b3f50844292ae9b1e1e8e03d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,148 INFO [StoreOpener-6737d9a3b3f50844292ae9b1e1e8e03d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6737d9a3b3f50844292ae9b1e1e8e03d columnFamilyName info 2024-12-06T03:49:15,148 DEBUG [StoreOpener-6737d9a3b3f50844292ae9b1e1e8e03d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:15,148 INFO [StoreOpener-6737d9a3b3f50844292ae9b1e1e8e03d-1 {}] regionserver.HStore(327): Store=6737d9a3b3f50844292ae9b1e1e8e03d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:49:15,148 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,149 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,149 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,149 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,149 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,151 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,152 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:49:15,153 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6737d9a3b3f50844292ae9b1e1e8e03d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878647, jitterRate=0.1172577440738678}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T03:49:15,153 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:15,153 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6737d9a3b3f50844292ae9b1e1e8e03d: Running coprocessor pre-open hook at 1733456955145Writing region info on filesystem at 1733456955145Initializing all the Stores at 1733456955146 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456955146Cleaning up temporary data from old regions at 1733456955149 (+3 ms)Running coprocessor post-open hooks at 1733456955153 (+4 ms)Region opened successfully at 1733456955153 2024-12-06T03:49:15,154 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., pid=6, masterSystemTime=1733456955141 2024-12-06T03:49:15,156 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:15,156 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:15,156 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6737d9a3b3f50844292ae9b1e1e8e03d, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:15,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577 because future has completed 2024-12-06T03:49:15,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T03:49:15,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577 in 170 msec 2024-12-06T03:49:15,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T03:49:15,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6737d9a3b3f50844292ae9b1e1e8e03d, ASSIGN in 328 msec 2024-12-06T03:49:15,163 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T03:49:15,163 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733456955163"}]},"ts":"1733456955163"} 2024-12-06T03:49:15,165 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-06T03:49:15,166 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T03:49:15,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 357 msec 2024-12-06T03:49:15,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:15,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:16,197 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,197 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,217 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:16,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:16,722 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T03:49:16,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:16,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:17,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:17,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:18,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:18,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:19,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:19,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:19,945 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T03:49:19,945 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-06T03:49:20,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:20,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:21,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:21,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:22,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:22,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:22,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:49:22,975 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-06T03:49:22,977 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:49:22,977 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-06T03:49:22,977 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T03:49:22,977 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-06T03:49:22,979 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-06T03:49:22,979 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-06T03:49:23,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:23,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:24,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:24,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33419 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-06T03:49:24,823 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-06T03:49:24,823 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-06T03:49:24,829 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-06T03:49:24,829 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:24,835 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., hostname=6f1b912b0816,33823,1733456953577, seqNum=2] 2024-12-06T03:49:24,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:24,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6737d9a3b3f50844292ae9b1e1e8e03d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:49:24,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/a734bc503a714eccbc53ddd977fa8f65 is 1080, key is row0001/info:/1733456964836/Put/seqid=0 2024-12-06T03:49:24,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741837_1013 (size=12509) 2024-12-06T03:49:24,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741837_1013 (size=12509) 2024-12-06T03:49:24,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-06T03:49:24,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47108 deadline: 1733456974889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:24,894 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., hostname=6f1b912b0816,33823,1733456953577, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., hostname=6f1b912b0816,33823,1733456953577, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-06T03:49:24,894 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., hostname=6f1b912b0816,33823,1733456953577, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-06T03:49:24,894 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., hostname=6f1b912b0816,33823,1733456953577, seqNum=2 because the exception is null or not the one we care about 2024-12-06T03:49:25,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/a734bc503a714eccbc53ddd977fa8f65 2024-12-06T03:49:25,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/a734bc503a714eccbc53ddd977fa8f65 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/a734bc503a714eccbc53ddd977fa8f65 2024-12-06T03:49:25,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/a734bc503a714eccbc53ddd977fa8f65, entries=7, sequenceid=11, filesize=12.2 K 2024-12-06T03:49:25,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 6737d9a3b3f50844292ae9b1e1e8e03d in 442ms, sequenceid=11, compaction requested=false 2024-12-06T03:49:25,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6737d9a3b3f50844292ae9b1e1e8e03d: 2024-12-06T03:49:25,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:25,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:26,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:26,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:27,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:27,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:28,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:28,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:29,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:29,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:30,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:30,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:31,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:31,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:32,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:32,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:33,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:33,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:34,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:34,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:34,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6737d9a3b3f50844292ae9b1e1e8e03d 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-06T03:49:34,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/6a07397a4bb0458da1e55ace880baf64 is 1080, key is row0008/info:/1733456964851/Put/seqid=0 2024-12-06T03:49:34,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741838_1014 (size=29761) 2024-12-06T03:49:34,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741838_1014 (size=29761) 2024-12-06T03:49:34,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/6a07397a4bb0458da1e55ace880baf64 2024-12-06T03:49:34,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/6a07397a4bb0458da1e55ace880baf64 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6a07397a4bb0458da1e55ace880baf64 2024-12-06T03:49:34,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6a07397a4bb0458da1e55ace880baf64, entries=23, sequenceid=37, filesize=29.1 K 2024-12-06T03:49:34,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 6737d9a3b3f50844292ae9b1e1e8e03d in 23ms, sequenceid=37, compaction requested=false 2024-12-06T03:49:34,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6737d9a3b3f50844292ae9b1e1e8e03d: 2024-12-06T03:49:34,985 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-06T03:49:34,985 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:34,985 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6a07397a4bb0458da1e55ace880baf64 because midkey is the same as first or last row 2024-12-06T03:49:35,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:35,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:36,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:36,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:36,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:36,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6737d9a3b3f50844292ae9b1e1e8e03d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:49:36,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/c578ce4be8a244e5b04b2cbe62c74edd is 1080, key is row0031/info:/1733456974964/Put/seqid=0 2024-12-06T03:49:36,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741839_1015 (size=12509) 2024-12-06T03:49:36,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741839_1015 (size=12509) 2024-12-06T03:49:36,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/c578ce4be8a244e5b04b2cbe62c74edd 2024-12-06T03:49:36,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/c578ce4be8a244e5b04b2cbe62c74edd as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c578ce4be8a244e5b04b2cbe62c74edd 2024-12-06T03:49:36,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c578ce4be8a244e5b04b2cbe62c74edd, entries=7, sequenceid=47, filesize=12.2 K 2024-12-06T03:49:37,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 6737d9a3b3f50844292ae9b1e1e8e03d in 24ms, sequenceid=47, compaction requested=true 2024-12-06T03:49:37,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6737d9a3b3f50844292ae9b1e1e8e03d: 2024-12-06T03:49:37,000 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-06T03:49:37,001 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:37,001 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6a07397a4bb0458da1e55ace880baf64 because midkey is the same as first or last row 2024-12-06T03:49:37,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6737d9a3b3f50844292ae9b1e1e8e03d:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:49:37,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:37,001 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:49:37,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:37,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6737d9a3b3f50844292ae9b1e1e8e03d 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-06T03:49:37,002 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:49:37,003 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1541): 6737d9a3b3f50844292ae9b1e1e8e03d/info is initiating minor compaction (all files) 2024-12-06T03:49:37,003 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6737d9a3b3f50844292ae9b1e1e8e03d/info in TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:37,003 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/a734bc503a714eccbc53ddd977fa8f65, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6a07397a4bb0458da1e55ace880baf64, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c578ce4be8a244e5b04b2cbe62c74edd] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp, totalSize=53.5 K 2024-12-06T03:49:37,003 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting a734bc503a714eccbc53ddd977fa8f65, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733456964836 2024-12-06T03:49:37,004 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a07397a4bb0458da1e55ace880baf64, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733456964851 2024-12-06T03:49:37,004 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting c578ce4be8a244e5b04b2cbe62c74edd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733456974964 2024-12-06T03:49:37,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/c69798c54c2e45cb9194d369ca0cde40 is 1080, key is row0038/info:/1733456976977/Put/seqid=0 2024-12-06T03:49:37,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741840_1016 (size=18987) 2024-12-06T03:49:37,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741840_1016 (size=18987) 2024-12-06T03:49:37,019 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6737d9a3b3f50844292ae9b1e1e8e03d#info#compaction#58 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:37,020 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/24d92ee1c4b74915b92ef040d29aeec2 is 1080, key is row0001/info:/1733456964836/Put/seqid=0 2024-12-06T03:49:37,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741841_1017 (size=44978) 2024-12-06T03:49:37,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741841_1017 (size=44978) 2024-12-06T03:49:37,032 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/24d92ee1c4b74915b92ef040d29aeec2 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2 2024-12-06T03:49:37,038 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6737d9a3b3f50844292ae9b1e1e8e03d/info of 6737d9a3b3f50844292ae9b1e1e8e03d into 24d92ee1c4b74915b92ef040d29aeec2(size=43.9 K), total size for store is 43.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:37,038 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6737d9a3b3f50844292ae9b1e1e8e03d: 2024-12-06T03:49:37,038 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., storeName=6737d9a3b3f50844292ae9b1e1e8e03d/info, priority=13, startTime=1733456977001; duration=0sec 2024-12-06T03:49:37,038 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-12-06T03:49:37,038 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:37,038 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2 because midkey is the same as first or last row 2024-12-06T03:49:37,039 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-12-06T03:49:37,039 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:37,039 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2 because midkey is the same as first or last row 2024-12-06T03:49:37,039 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=43.9 K, sizeToCheck=16.0 K 2024-12-06T03:49:37,039 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:37,039 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2 because midkey is the same as first or last row 2024-12-06T03:49:37,039 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:37,039 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6737d9a3b3f50844292ae9b1e1e8e03d:info 2024-12-06T03:49:37,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/c69798c54c2e45cb9194d369ca0cde40 2024-12-06T03:49:37,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/c69798c54c2e45cb9194d369ca0cde40 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c69798c54c2e45cb9194d369ca0cde40 2024-12-06T03:49:37,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c69798c54c2e45cb9194d369ca0cde40, entries=13, sequenceid=63, filesize=18.5 K 2024-12-06T03:49:37,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=14.71 KB/15064 for 6737d9a3b3f50844292ae9b1e1e8e03d in 426ms, sequenceid=63, compaction requested=false 2024-12-06T03:49:37,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6737d9a3b3f50844292ae9b1e1e8e03d: 2024-12-06T03:49:37,428 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-12-06T03:49:37,428 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:37,428 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2 because midkey is the same as first or last row 2024-12-06T03:49:37,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:37,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:38,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:38,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6737d9a3b3f50844292ae9b1e1e8e03d 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-06T03:49:39,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/6c4ad210bc57467cad7f8262749d975e is 1080, key is row0051/info:/1733456977003/Put/seqid=0 2024-12-06T03:49:39,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741842_1018 (size=21141) 2024-12-06T03:49:39,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741842_1018 (size=21141) 2024-12-06T03:49:39,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/6c4ad210bc57467cad7f8262749d975e 2024-12-06T03:49:39,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/6c4ad210bc57467cad7f8262749d975e as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6c4ad210bc57467cad7f8262749d975e 2024-12-06T03:49:39,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6c4ad210bc57467cad7f8262749d975e, entries=15, sequenceid=82, filesize=20.6 K 2024-12-06T03:49:39,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 6737d9a3b3f50844292ae9b1e1e8e03d in 27ms, sequenceid=82, compaction requested=true 2024-12-06T03:49:39,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6737d9a3b3f50844292ae9b1e1e8e03d: 2024-12-06T03:49:39,060 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-06T03:49:39,060 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:39,060 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2 because midkey is the same as first or last row 2024-12-06T03:49:39,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6737d9a3b3f50844292ae9b1e1e8e03d:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:49:39,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,060 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:49:39,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6737d9a3b3f50844292ae9b1e1e8e03d 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-06T03:49:39,062 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:49:39,062 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1541): 6737d9a3b3f50844292ae9b1e1e8e03d/info is initiating minor compaction (all files) 2024-12-06T03:49:39,062 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6737d9a3b3f50844292ae9b1e1e8e03d/info in TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:39,062 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c69798c54c2e45cb9194d369ca0cde40, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6c4ad210bc57467cad7f8262749d975e] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp, totalSize=83.1 K 2024-12-06T03:49:39,062 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 24d92ee1c4b74915b92ef040d29aeec2, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733456964836 2024-12-06T03:49:39,063 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting c69798c54c2e45cb9194d369ca0cde40, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1733456976977 2024-12-06T03:49:39,063 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c4ad210bc57467cad7f8262749d975e, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733456977003 2024-12-06T03:49:39,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/f349df77c76c4697be056fa5e9133583 is 1080, key is row0066/info:/1733456979035/Put/seqid=0 2024-12-06T03:49:39,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741843_1019 (size=17894) 2024-12-06T03:49:39,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741843_1019 (size=17894) 2024-12-06T03:49:39,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/f349df77c76c4697be056fa5e9133583 2024-12-06T03:49:39,082 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6737d9a3b3f50844292ae9b1e1e8e03d#info#compaction#61 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:39,083 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/9e8300e342d1471694c32f5e1cb49c38 is 1080, key is row0001/info:/1733456964836/Put/seqid=0 2024-12-06T03:49:39,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/f349df77c76c4697be056fa5e9133583 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/f349df77c76c4697be056fa5e9133583 2024-12-06T03:49:39,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/f349df77c76c4697be056fa5e9133583, entries=12, sequenceid=97, filesize=17.5 K 2024-12-06T03:49:39,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for 6737d9a3b3f50844292ae9b1e1e8e03d in 34ms, sequenceid=97, compaction requested=false 2024-12-06T03:49:39,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6737d9a3b3f50844292ae9b1e1e8e03d: 2024-12-06T03:49:39,094 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=100.6 K, sizeToCheck=16.0 K 2024-12-06T03:49:39,094 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:39,094 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2 because midkey is the same as first or last row 2024-12-06T03:49:39,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741844_1020 (size=75378) 2024-12-06T03:49:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6737d9a3b3f50844292ae9b1e1e8e03d 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-06T03:49:39,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741844_1020 (size=75378) 2024-12-06T03:49:39,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/de2e2723042b4290877cb982e6947d98 is 1080, key is row0078/info:/1733456979062/Put/seqid=0 2024-12-06T03:49:39,105 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/9e8300e342d1471694c32f5e1cb49c38 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/9e8300e342d1471694c32f5e1cb49c38 2024-12-06T03:49:39,111 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6737d9a3b3f50844292ae9b1e1e8e03d/info of 6737d9a3b3f50844292ae9b1e1e8e03d into 9e8300e342d1471694c32f5e1cb49c38(size=73.6 K), total size for store is 91.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:39,111 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6737d9a3b3f50844292ae9b1e1e8e03d: 2024-12-06T03:49:39,111 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., storeName=6737d9a3b3f50844292ae9b1e1e8e03d/info, priority=13, startTime=1733456979060; duration=0sec 2024-12-06T03:49:39,111 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-12-06T03:49:39,111 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:39,111 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-12-06T03:49:39,111 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:39,111 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-12-06T03:49:39,111 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:39,118 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:39,118 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:39,118 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6737d9a3b3f50844292ae9b1e1e8e03d:info 2024-12-06T03:49:39,120 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33419 {}] assignment.AssignmentManager(1363): Split request from 6f1b912b0816,33823,1733456953577, parent={ENCODED => 6737d9a3b3f50844292ae9b1e1e8e03d, NAME => 'TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-06T03:49:39,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741845_1021 (size=22222) 2024-12-06T03:49:39,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741845_1021 (size=22222) 2024-12-06T03:49:39,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/de2e2723042b4290877cb982e6947d98 2024-12-06T03:49:39,125 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33419 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:39,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/de2e2723042b4290877cb982e6947d98 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/de2e2723042b4290877cb982e6947d98 2024-12-06T03:49:39,130 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33419 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=6737d9a3b3f50844292ae9b1e1e8e03d, daughterA=64e8b1893dc9f355346e3f21b27481c3, daughterB=9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,132 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=6737d9a3b3f50844292ae9b1e1e8e03d, daughterA=64e8b1893dc9f355346e3f21b27481c3, daughterB=9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,132 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=6737d9a3b3f50844292ae9b1e1e8e03d, daughterA=64e8b1893dc9f355346e3f21b27481c3, daughterB=9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,132 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=6737d9a3b3f50844292ae9b1e1e8e03d, daughterA=64e8b1893dc9f355346e3f21b27481c3, daughterB=9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/de2e2723042b4290877cb982e6947d98, entries=16, sequenceid=116, filesize=21.7 K 2024-12-06T03:49:39,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=3.15 KB/3228 for 6737d9a3b3f50844292ae9b1e1e8e03d in 39ms, sequenceid=116, compaction requested=true 2024-12-06T03:49:39,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6737d9a3b3f50844292ae9b1e1e8e03d: 2024-12-06T03:49:39,136 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-12-06T03:49:39,136 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:39,136 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-12-06T03:49:39,136 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:39,136 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-12-06T03:49:39,136 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T03:49:39,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:39,138 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33419 {}] assignment.AssignmentManager(1363): Split request from 6f1b912b0816,33823,1733456953577, parent={ENCODED => 6737d9a3b3f50844292ae9b1e1e8e03d, NAME => 'TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-06T03:49:39,138 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33419 {}] assignment.AssignmentManager(1378): Ignoring split request from 6f1b912b0816,33823,1733456953577, parent={ENCODED => 6737d9a3b3f50844292ae9b1e1e8e03d, NAME => 'TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.', STARTKEY => '', ENDKEY => ''} because parent is unknown or not open 2024-12-06T03:49:39,139 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6737d9a3b3f50844292ae9b1e1e8e03d, UNASSIGN}] 2024-12-06T03:49:39,140 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6737d9a3b3f50844292ae9b1e1e8e03d, UNASSIGN 2024-12-06T03:49:39,142 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=6737d9a3b3f50844292ae9b1e1e8e03d, regionState=CLOSING, regionLocation=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:39,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6737d9a3b3f50844292ae9b1e1e8e03d, UNASSIGN because future has completed 2024-12-06T03:49:39,144 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-06T03:49:39,144 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577}] 2024-12-06T03:49:39,303 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,303 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-06T03:49:39,304 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 6737d9a3b3f50844292ae9b1e1e8e03d, disabling compactions & flushes 2024-12-06T03:49:39,304 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:39,304 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:39,304 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. after waiting 0 ms 2024-12-06T03:49:39,305 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:39,305 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 6737d9a3b3f50844292ae9b1e1e8e03d 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-06T03:49:39,313 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/2fb1865141aa4862a15a1d091b9ab214 is 1080, key is row0094/info:/1733456979098/Put/seqid=0 2024-12-06T03:49:39,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741846_1022 (size=8193) 2024-12-06T03:49:39,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741846_1022 (size=8193) 2024-12-06T03:49:39,319 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/2fb1865141aa4862a15a1d091b9ab214 2024-12-06T03:49:39,326 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/.tmp/info/2fb1865141aa4862a15a1d091b9ab214 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/2fb1865141aa4862a15a1d091b9ab214 2024-12-06T03:49:39,333 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/2fb1865141aa4862a15a1d091b9ab214, entries=3, sequenceid=123, filesize=8.0 K 2024-12-06T03:49:39,334 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6737d9a3b3f50844292ae9b1e1e8e03d in 29ms, sequenceid=123, compaction requested=true 2024-12-06T03:49:39,336 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/a734bc503a714eccbc53ddd977fa8f65, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6a07397a4bb0458da1e55ace880baf64, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c578ce4be8a244e5b04b2cbe62c74edd, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c69798c54c2e45cb9194d369ca0cde40, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6c4ad210bc57467cad7f8262749d975e] to archive 2024-12-06T03:49:39,337 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T03:49:39,339 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/a734bc503a714eccbc53ddd977fa8f65 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/a734bc503a714eccbc53ddd977fa8f65 2024-12-06T03:49:39,340 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6a07397a4bb0458da1e55ace880baf64 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6a07397a4bb0458da1e55ace880baf64 2024-12-06T03:49:39,341 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/24d92ee1c4b74915b92ef040d29aeec2 2024-12-06T03:49:39,342 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c578ce4be8a244e5b04b2cbe62c74edd to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c578ce4be8a244e5b04b2cbe62c74edd 2024-12-06T03:49:39,344 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c69798c54c2e45cb9194d369ca0cde40 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/c69798c54c2e45cb9194d369ca0cde40 2024-12-06T03:49:39,345 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6c4ad210bc57467cad7f8262749d975e to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/6c4ad210bc57467cad7f8262749d975e 2024-12-06T03:49:39,351 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-12-06T03:49:39,351 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. 2024-12-06T03:49:39,351 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 6737d9a3b3f50844292ae9b1e1e8e03d: Waiting for close lock at 1733456979304Running coprocessor pre-close hooks at 1733456979304Disabling compacts and flushes for region at 1733456979304Disabling writes for close at 1733456979304Obtaining lock to block concurrent updates at 1733456979305 (+1 ms)Preparing flush snapshotting stores in 6737d9a3b3f50844292ae9b1e1e8e03d at 1733456979305Finished memstore snapshotting TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733456979305Flushing stores of TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. at 1733456979306 (+1 ms)Flushing 6737d9a3b3f50844292ae9b1e1e8e03d/info: creating writer at 1733456979307 (+1 ms)Flushing 6737d9a3b3f50844292ae9b1e1e8e03d/info: appending metadata at 1733456979312 (+5 ms)Flushing 6737d9a3b3f50844292ae9b1e1e8e03d/info: closing flushed file at 1733456979312Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@794400bc: reopening flushed file at 1733456979325 (+13 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6737d9a3b3f50844292ae9b1e1e8e03d in 29ms, sequenceid=123, compaction requested=true at 1733456979334 (+9 ms)Writing region close event to WAL at 1733456979347 (+13 ms)Running coprocessor post-close hooks at 1733456979351 (+4 ms)Closed at 1733456979351 2024-12-06T03:49:39,353 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,354 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=6737d9a3b3f50844292ae9b1e1e8e03d, regionState=CLOSED 2024-12-06T03:49:39,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577 because future has completed 2024-12-06T03:49:39,359 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-06T03:49:39,359 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 6737d9a3b3f50844292ae9b1e1e8e03d, server=6f1b912b0816,33823,1733456953577 in 213 msec 2024-12-06T03:49:39,362 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-06T03:49:39,362 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6737d9a3b3f50844292ae9b1e1e8e03d, UNASSIGN in 220 msec 2024-12-06T03:49:39,372 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:39,376 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=6737d9a3b3f50844292ae9b1e1e8e03d, threads=4 2024-12-06T03:49:39,378 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/2fb1865141aa4862a15a1d091b9ab214 for region: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,378 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/de2e2723042b4290877cb982e6947d98 for region: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,378 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/f349df77c76c4697be056fa5e9133583 for region: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,379 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/9e8300e342d1471694c32f5e1cb49c38 for region: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,389 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/f349df77c76c4697be056fa5e9133583, top=true 2024-12-06T03:49:39,389 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/2fb1865141aa4862a15a1d091b9ab214, top=true 2024-12-06T03:49:39,390 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/de2e2723042b4290877cb982e6947d98, top=true 2024-12-06T03:49:39,397 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-2fb1865141aa4862a15a1d091b9ab214 for child: 9137d99742c4c20a6bcf5013532694f1, parent: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,397 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-f349df77c76c4697be056fa5e9133583 for child: 9137d99742c4c20a6bcf5013532694f1, parent: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,397 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/f349df77c76c4697be056fa5e9133583 for region: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,397 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/2fb1865141aa4862a15a1d091b9ab214 for region: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,397 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-de2e2723042b4290877cb982e6947d98 for child: 9137d99742c4c20a6bcf5013532694f1, parent: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,398 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/de2e2723042b4290877cb982e6947d98 for region: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741847_1023 (size=27) 2024-12-06T03:49:39,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741847_1023 (size=27) 2024-12-06T03:49:39,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741848_1024 (size=27) 2024-12-06T03:49:39,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741848_1024 (size=27) 2024-12-06T03:49:39,408 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/9e8300e342d1471694c32f5e1cb49c38 for region: 6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:49:39,410 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 6737d9a3b3f50844292ae9b1e1e8e03d Daughter A: [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d] storefiles, Daughter B: [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-2fb1865141aa4862a15a1d091b9ab214, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-de2e2723042b4290877cb982e6947d98, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-f349df77c76c4697be056fa5e9133583] storefiles. 2024-12-06T03:49:39,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741849_1025 (size=71) 2024-12-06T03:49:39,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741849_1025 (size=71) 2024-12-06T03:49:39,420 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:39,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741850_1026 (size=71) 2024-12-06T03:49:39,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741850_1026 (size=71) 2024-12-06T03:49:39,432 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:39,442 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-06T03:49:39,444 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-06T03:49:39,446 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733456979446"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733456979446"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733456979446"}]},"ts":"1733456979446"} 2024-12-06T03:49:39,447 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733456979446"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733456979446"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733456979446"}]},"ts":"1733456979446"} 2024-12-06T03:49:39,447 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733456979446"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733456979446"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733456979446"}]},"ts":"1733456979446"} 2024-12-06T03:49:39,460 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=64e8b1893dc9f355346e3f21b27481c3, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9137d99742c4c20a6bcf5013532694f1, ASSIGN}] 2024-12-06T03:49:39,461 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9137d99742c4c20a6bcf5013532694f1, ASSIGN 2024-12-06T03:49:39,461 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=64e8b1893dc9f355346e3f21b27481c3, ASSIGN 2024-12-06T03:49:39,462 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=64e8b1893dc9f355346e3f21b27481c3, ASSIGN; state=SPLITTING_NEW, location=6f1b912b0816,33823,1733456953577; forceNewPlan=false, retain=false 2024-12-06T03:49:39,462 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9137d99742c4c20a6bcf5013532694f1, ASSIGN; state=SPLITTING_NEW, location=6f1b912b0816,33823,1733456953577; forceNewPlan=false, retain=false 2024-12-06T03:49:39,614 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=64e8b1893dc9f355346e3f21b27481c3, regionState=OPENING, regionLocation=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:39,614 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=9137d99742c4c20a6bcf5013532694f1, regionState=OPENING, regionLocation=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:39,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=64e8b1893dc9f355346e3f21b27481c3, ASSIGN because future has completed 2024-12-06T03:49:39,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 64e8b1893dc9f355346e3f21b27481c3, server=6f1b912b0816,33823,1733456953577}] 2024-12-06T03:49:39,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9137d99742c4c20a6bcf5013532694f1, ASSIGN because future has completed 2024-12-06T03:49:39,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9137d99742c4c20a6bcf5013532694f1, server=6f1b912b0816,33823,1733456953577}] 2024-12-06T03:49:39,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:39,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:39,781 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:39,781 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 9137d99742c4c20a6bcf5013532694f1, NAME => 'TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-06T03:49:39,782 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,782 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:49:39,782 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,782 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,784 INFO [StoreOpener-9137d99742c4c20a6bcf5013532694f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,785 INFO [StoreOpener-9137d99742c4c20a6bcf5013532694f1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9137d99742c4c20a6bcf5013532694f1 columnFamilyName info 2024-12-06T03:49:39,785 DEBUG [StoreOpener-9137d99742c4c20a6bcf5013532694f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:39,799 DEBUG [StoreOpener-9137d99742c4c20a6bcf5013532694f1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d->hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/9e8300e342d1471694c32f5e1cb49c38-top 2024-12-06T03:49:39,804 DEBUG [StoreOpener-9137d99742c4c20a6bcf5013532694f1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-2fb1865141aa4862a15a1d091b9ab214 2024-12-06T03:49:39,809 DEBUG [StoreOpener-9137d99742c4c20a6bcf5013532694f1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-de2e2723042b4290877cb982e6947d98 2024-12-06T03:49:39,814 DEBUG [StoreOpener-9137d99742c4c20a6bcf5013532694f1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-f349df77c76c4697be056fa5e9133583 2024-12-06T03:49:39,814 INFO [StoreOpener-9137d99742c4c20a6bcf5013532694f1-1 {}] regionserver.HStore(327): Store=9137d99742c4c20a6bcf5013532694f1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:49:39,814 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,815 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,816 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,817 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,817 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,818 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,819 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 9137d99742c4c20a6bcf5013532694f1; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728160, jitterRate=-0.07409711182117462}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T03:49:39,819 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:39,820 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 9137d99742c4c20a6bcf5013532694f1: Running coprocessor pre-open hook at 1733456979782Writing region info on filesystem at 1733456979782Initializing all the Stores at 1733456979783 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456979783Cleaning up temporary data from old regions at 1733456979817 (+34 ms)Running coprocessor post-open hooks at 1733456979819 (+2 ms)Region opened successfully at 1733456979820 (+1 ms) 2024-12-06T03:49:39,821 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., pid=13, masterSystemTime=1733456979772 2024-12-06T03:49:39,821 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 9137d99742c4c20a6bcf5013532694f1:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:49:39,821 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:39,821 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-06T03:49:39,822 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:39,822 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1541): 9137d99742c4c20a6bcf5013532694f1/info is initiating minor compaction (all files) 2024-12-06T03:49:39,822 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9137d99742c4c20a6bcf5013532694f1/info in TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:39,823 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:39,823 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d->hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/9e8300e342d1471694c32f5e1cb49c38-top, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-f349df77c76c4697be056fa5e9133583, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-de2e2723042b4290877cb982e6947d98, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-2fb1865141aa4862a15a1d091b9ab214] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp, totalSize=120.8 K 2024-12-06T03:49:39,823 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:39,823 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:49:39,823 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 64e8b1893dc9f355346e3f21b27481c3, NAME => 'TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-06T03:49:39,823 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,823 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:49:39,823 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733456964836 2024-12-06T03:49:39,823 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,824 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,824 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=9137d99742c4c20a6bcf5013532694f1, regionState=OPEN, openSeqNum=127, regionLocation=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:39,824 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-f349df77c76c4697be056fa5e9133583, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1733456979035 2024-12-06T03:49:39,824 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-de2e2723042b4290877cb982e6947d98, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733456979062 2024-12-06T03:49:39,825 INFO [StoreOpener-64e8b1893dc9f355346e3f21b27481c3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,825 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-2fb1865141aa4862a15a1d091b9ab214, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733456979098 2024-12-06T03:49:39,825 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-06T03:49:39,825 INFO [StoreOpener-64e8b1893dc9f355346e3f21b27481c3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 64e8b1893dc9f355346e3f21b27481c3 columnFamilyName info 2024-12-06T03:49:39,825 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-06T03:49:39,825 DEBUG [StoreOpener-64e8b1893dc9f355346e3f21b27481c3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:49:39,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-06T03:49:39,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9137d99742c4c20a6bcf5013532694f1, server=6f1b912b0816,33823,1733456953577 because future has completed 2024-12-06T03:49:39,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-12-06T03:49:39,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 9137d99742c4c20a6bcf5013532694f1, server=6f1b912b0816,33823,1733456953577 in 206 msec 2024-12-06T03:49:39,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9137d99742c4c20a6bcf5013532694f1, ASSIGN in 370 msec 2024-12-06T03:49:39,836 DEBUG [StoreOpener-64e8b1893dc9f355346e3f21b27481c3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d->hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/9e8300e342d1471694c32f5e1cb49c38-bottom 2024-12-06T03:49:39,836 INFO [StoreOpener-64e8b1893dc9f355346e3f21b27481c3-1 {}] regionserver.HStore(327): Store=64e8b1893dc9f355346e3f21b27481c3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:49:39,836 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,837 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,838 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,839 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,839 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,840 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,841 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 64e8b1893dc9f355346e3f21b27481c3; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773306, jitterRate=-0.016691535711288452}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T03:49:39,841 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:49:39,842 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 64e8b1893dc9f355346e3f21b27481c3: Running coprocessor pre-open hook at 1733456979824Writing region info on filesystem at 1733456979824Initializing all the Stores at 1733456979824Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733456979824Cleaning up temporary data from old regions at 1733456979839 (+15 ms)Running coprocessor post-open hooks at 1733456979841 (+2 ms)Region opened successfully at 1733456979842 (+1 ms) 2024-12-06T03:49:39,842 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3., pid=12, masterSystemTime=1733456979772 2024-12-06T03:49:39,843 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 64e8b1893dc9f355346e3f21b27481c3:info, priority=-2147483648, current under compaction store size is 2 2024-12-06T03:49:39,843 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:39,843 DEBUG [RS:0;6f1b912b0816:33823-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-06T03:49:39,843 INFO [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:49:39,843 DEBUG [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.HStore(1541): 64e8b1893dc9f355346e3f21b27481c3/info is initiating minor compaction (all files) 2024-12-06T03:49:39,843 INFO [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 64e8b1893dc9f355346e3f21b27481c3/info in TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:49:39,844 INFO [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d->hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/9e8300e342d1471694c32f5e1cb49c38-bottom] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/.tmp, totalSize=73.6 K 2024-12-06T03:49:39,844 DEBUG [RS:0;6f1b912b0816:33823-longCompactions-0 {}] compactions.Compactor(225): Compacting 9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733456964836 2024-12-06T03:49:39,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/info/c70150c10e924c7790afbe0e87b7480a is 193, key is TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1./info:regioninfo/1733456979823/Put/seqid=0 2024-12-06T03:49:39,845 DEBUG [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:49:39,846 INFO [RS_OPEN_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:49:39,847 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=64e8b1893dc9f355346e3f21b27481c3, regionState=OPEN, openSeqNum=127, regionLocation=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:39,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 64e8b1893dc9f355346e3f21b27481c3, server=6f1b912b0816,33823,1733456953577 because future has completed 2024-12-06T03:49:39,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-06T03:49:39,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 64e8b1893dc9f355346e3f21b27481c3, server=6f1b912b0816,33823,1733456953577 in 236 msec 2024-12-06T03:49:39,859 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9137d99742c4c20a6bcf5013532694f1#info#compaction#65 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:39,859 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/18c98f4d63e54c1cb26eb961ef1cf187 is 1080, key is row0062/info:/1733456977026/Put/seqid=0 2024-12-06T03:49:39,861 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-06T03:49:39,861 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=64e8b1893dc9f355346e3f21b27481c3, ASSIGN in 398 msec 2024-12-06T03:49:39,862 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=6737d9a3b3f50844292ae9b1e1e8e03d, daughterA=64e8b1893dc9f355346e3f21b27481c3, daughterB=9137d99742c4c20a6bcf5013532694f1 in 736 msec 2024-12-06T03:49:39,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741851_1027 (size=9882) 2024-12-06T03:49:39,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741851_1027 (size=9882) 2024-12-06T03:49:39,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/info/c70150c10e924c7790afbe0e87b7480a 2024-12-06T03:49:39,869 INFO [RS:0;6f1b912b0816:33823-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64e8b1893dc9f355346e3f21b27481c3#info#compaction#66 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:39,870 DEBUG [RS:0;6f1b912b0816:33823-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/.tmp/info/13f6b6679721495390d75662aabf8dde is 1080, key is row0001/info:/1733456964836/Put/seqid=0 2024-12-06T03:49:39,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741852_1028 (size=43081) 2024-12-06T03:49:39,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741852_1028 (size=43081) 2024-12-06T03:49:39,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741853_1029 (size=70862) 2024-12-06T03:49:39,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741853_1029 (size=70862) 2024-12-06T03:49:39,881 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/18c98f4d63e54c1cb26eb961ef1cf187 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/18c98f4d63e54c1cb26eb961ef1cf187 2024-12-06T03:49:39,885 DEBUG [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/.tmp/info/13f6b6679721495390d75662aabf8dde as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/info/13f6b6679721495390d75662aabf8dde 2024-12-06T03:49:39,888 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 9137d99742c4c20a6bcf5013532694f1/info of 9137d99742c4c20a6bcf5013532694f1 into 18c98f4d63e54c1cb26eb961ef1cf187(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:39,888 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:39,888 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., storeName=9137d99742c4c20a6bcf5013532694f1/info, priority=12, startTime=1733456979821; duration=0sec 2024-12-06T03:49:39,888 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:39,888 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9137d99742c4c20a6bcf5013532694f1:info 2024-12-06T03:49:39,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/ns/5108fe190df94ff080e351e5bb3b38f3 is 43, key is default/ns:d/1733456954779/Put/seqid=0 2024-12-06T03:49:39,892 INFO [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 64e8b1893dc9f355346e3f21b27481c3/info of 64e8b1893dc9f355346e3f21b27481c3 into 13f6b6679721495390d75662aabf8dde(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:39,892 DEBUG [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 64e8b1893dc9f355346e3f21b27481c3: 2024-12-06T03:49:39,892 INFO [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3., storeName=64e8b1893dc9f355346e3f21b27481c3/info, priority=15, startTime=1733456979843; duration=0sec 2024-12-06T03:49:39,892 DEBUG [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:39,892 DEBUG [RS:0;6f1b912b0816:33823-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64e8b1893dc9f355346e3f21b27481c3:info 2024-12-06T03:49:39,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741854_1030 (size=5153) 2024-12-06T03:49:39,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741854_1030 (size=5153) 2024-12-06T03:49:39,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/ns/5108fe190df94ff080e351e5bb3b38f3 2024-12-06T03:49:39,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/table/5ff3838ecf534d0e89ea5af026793e0a is 65, key is TestLogRolling-testLogRolling/table:state/1733456955163/Put/seqid=0 2024-12-06T03:49:39,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741855_1031 (size=5340) 2024-12-06T03:49:39,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741855_1031 (size=5340) 2024-12-06T03:49:39,917 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/table/5ff3838ecf534d0e89ea5af026793e0a 2024-12-06T03:49:39,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/info/c70150c10e924c7790afbe0e87b7480a as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/info/c70150c10e924c7790afbe0e87b7480a 2024-12-06T03:49:39,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/info/c70150c10e924c7790afbe0e87b7480a, entries=30, sequenceid=17, filesize=9.7 K 2024-12-06T03:49:39,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/ns/5108fe190df94ff080e351e5bb3b38f3 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/ns/5108fe190df94ff080e351e5bb3b38f3 2024-12-06T03:49:39,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/ns/5108fe190df94ff080e351e5bb3b38f3, entries=2, sequenceid=17, filesize=5.0 K 2024-12-06T03:49:39,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/table/5ff3838ecf534d0e89ea5af026793e0a as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/table/5ff3838ecf534d0e89ea5af026793e0a 2024-12-06T03:49:39,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/table/5ff3838ecf534d0e89ea5af026793e0a, entries=2, sequenceid=17, filesize=5.2 K 2024-12-06T03:49:39,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 117ms, sequenceid=17, compaction requested=false 2024-12-06T03:49:39,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-06T03:49:40,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:40,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:41,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47108 deadline: 1733456991106, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. is not online on 6f1b912b0816,33823,1733456953577 2024-12-06T03:49:41,110 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., hostname=6f1b912b0816,33823,1733456953577, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., hostname=6f1b912b0816,33823,1733456953577, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. is not online on 6f1b912b0816,33823,1733456953577 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-06T03:49:41,110 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., hostname=6f1b912b0816,33823,1733456953577, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d. is not online on 6f1b912b0816,33823,1733456953577 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-06T03:49:41,110 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733456954808.6737d9a3b3f50844292ae9b1e1e8e03d., hostname=6f1b912b0816,33823,1733456953577, seqNum=2 from cache 2024-12-06T03:49:41,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:41,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:42,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:42,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:43,419 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T03:49:43,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:43,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:44,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:44,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:44,887 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T03:49:44,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,894 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:44,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T03:49:45,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:45,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:46,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:46,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:47,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:47,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:48,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:48,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:49,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:49,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:50,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:50,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:51,144 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., hostname=6f1b912b0816,33823,1733456953577, seqNum=127] 2024-12-06T03:49:51,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:51,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:49:51,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/c9cd7ecd2a564b599bfc8777ae875027 is 1080, key is row0097/info:/1733456991145/Put/seqid=0 2024-12-06T03:49:51,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741856_1032 (size=12516) 2024-12-06T03:49:51,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741856_1032 (size=12516) 2024-12-06T03:49:51,165 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/c9cd7ecd2a564b599bfc8777ae875027 2024-12-06T03:49:51,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/c9cd7ecd2a564b599bfc8777ae875027 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c9cd7ecd2a564b599bfc8777ae875027 2024-12-06T03:49:51,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c9cd7ecd2a564b599bfc8777ae875027, entries=7, sequenceid=137, filesize=12.2 K 2024-12-06T03:49:51,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 9137d99742c4c20a6bcf5013532694f1 in 24ms, sequenceid=137, compaction requested=false 2024-12-06T03:49:51,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:51,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:51,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-06T03:49:51,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/e6ced80066ef44adb6979a93b1ab8d14 is 1080, key is row0104/info:/1733456991157/Put/seqid=0 2024-12-06T03:49:51,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741857_1033 (size=17906) 2024-12-06T03:49:51,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741857_1033 (size=17906) 2024-12-06T03:49:51,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/e6ced80066ef44adb6979a93b1ab8d14 2024-12-06T03:49:51,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/e6ced80066ef44adb6979a93b1ab8d14 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e6ced80066ef44adb6979a93b1ab8d14 2024-12-06T03:49:51,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e6ced80066ef44adb6979a93b1ab8d14, entries=12, sequenceid=152, filesize=17.5 K 2024-12-06T03:49:51,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 9137d99742c4c20a6bcf5013532694f1 in 23ms, sequenceid=152, compaction requested=true 2024-12-06T03:49:51,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:51,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9137d99742c4c20a6bcf5013532694f1:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:49:51,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:51,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:51,205 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:49:51,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-06T03:49:51,206 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:49:51,206 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1541): 9137d99742c4c20a6bcf5013532694f1/info is initiating minor compaction (all files) 2024-12-06T03:49:51,206 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9137d99742c4c20a6bcf5013532694f1/info in TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:51,206 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/18c98f4d63e54c1cb26eb961ef1cf187, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c9cd7ecd2a564b599bfc8777ae875027, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e6ced80066ef44adb6979a93b1ab8d14] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp, totalSize=71.8 K 2024-12-06T03:49:51,207 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 18c98f4d63e54c1cb26eb961ef1cf187, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733456977026 2024-12-06T03:49:51,207 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting c9cd7ecd2a564b599bfc8777ae875027, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733456991145 2024-12-06T03:49:51,207 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting e6ced80066ef44adb6979a93b1ab8d14, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733456991157 2024-12-06T03:49:51,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/5cdb1eef59f14b3ba91b4aa7a4fd28de is 1080, key is row0116/info:/1733456991182/Put/seqid=0 2024-12-06T03:49:51,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741858_1034 (size=17906) 2024-12-06T03:49:51,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741858_1034 (size=17906) 2024-12-06T03:49:51,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/5cdb1eef59f14b3ba91b4aa7a4fd28de 2024-12-06T03:49:51,219 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9137d99742c4c20a6bcf5013532694f1#info#compaction#72 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:51,220 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/c6b0cb981b2e45b7bcd2693dc9cde2d4 is 1080, key is row0062/info:/1733456977026/Put/seqid=0 2024-12-06T03:49:51,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/5cdb1eef59f14b3ba91b4aa7a4fd28de as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5cdb1eef59f14b3ba91b4aa7a4fd28de 2024-12-06T03:49:51,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741859_1035 (size=63733) 2024-12-06T03:49:51,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741859_1035 (size=63733) 2024-12-06T03:49:51,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5cdb1eef59f14b3ba91b4aa7a4fd28de, entries=12, sequenceid=167, filesize=17.5 K 2024-12-06T03:49:51,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 9137d99742c4c20a6bcf5013532694f1 in 24ms, sequenceid=167, compaction requested=false 2024-12-06T03:49:51,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:51,230 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/c6b0cb981b2e45b7bcd2693dc9cde2d4 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c6b0cb981b2e45b7bcd2693dc9cde2d4 2024-12-06T03:49:51,235 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9137d99742c4c20a6bcf5013532694f1/info of 9137d99742c4c20a6bcf5013532694f1 into c6b0cb981b2e45b7bcd2693dc9cde2d4(size=62.2 K), total size for store is 79.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:51,235 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:51,235 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., storeName=9137d99742c4c20a6bcf5013532694f1/info, priority=13, startTime=1733456991205; duration=0sec 2024-12-06T03:49:51,235 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:51,235 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9137d99742c4c20a6bcf5013532694f1:info 2024-12-06T03:49:51,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:51,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:52,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:52,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:53,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:49:53,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/f1f46ad690534d7ca2d7218dca11aaed is 1080, key is row0128/info:/1733456991206/Put/seqid=0 2024-12-06T03:49:53,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741860_1036 (size=12516) 2024-12-06T03:49:53,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741860_1036 (size=12516) 2024-12-06T03:49:53,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/f1f46ad690534d7ca2d7218dca11aaed 2024-12-06T03:49:53,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/f1f46ad690534d7ca2d7218dca11aaed as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/f1f46ad690534d7ca2d7218dca11aaed 2024-12-06T03:49:53,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/f1f46ad690534d7ca2d7218dca11aaed, entries=7, sequenceid=178, filesize=12.2 K 2024-12-06T03:49:53,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 9137d99742c4c20a6bcf5013532694f1 in 25ms, sequenceid=178, compaction requested=true 2024-12-06T03:49:53,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:53,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9137d99742c4c20a6bcf5013532694f1:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:49:53,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:53,270 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:49:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:53,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-06T03:49:53,271 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94155 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:49:53,271 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1541): 9137d99742c4c20a6bcf5013532694f1/info is initiating minor compaction (all files) 2024-12-06T03:49:53,272 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9137d99742c4c20a6bcf5013532694f1/info in TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:53,272 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c6b0cb981b2e45b7bcd2693dc9cde2d4, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5cdb1eef59f14b3ba91b4aa7a4fd28de, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/f1f46ad690534d7ca2d7218dca11aaed] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp, totalSize=91.9 K 2024-12-06T03:49:53,272 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6b0cb981b2e45b7bcd2693dc9cde2d4, keycount=54, bloomtype=ROW, size=62.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733456977026 2024-12-06T03:49:53,272 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5cdb1eef59f14b3ba91b4aa7a4fd28de, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733456991182 2024-12-06T03:49:53,273 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting f1f46ad690534d7ca2d7218dca11aaed, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733456991206 2024-12-06T03:49:53,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/975276476c574038b04a57a9afdc2fdc is 1080, key is row0135/info:/1733456993246/Put/seqid=0 2024-12-06T03:49:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741861_1037 (size=17906) 2024-12-06T03:49:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741861_1037 (size=17906) 2024-12-06T03:49:53,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/975276476c574038b04a57a9afdc2fdc 2024-12-06T03:49:53,291 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9137d99742c4c20a6bcf5013532694f1#info#compaction#75 average throughput is 37.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:53,292 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/5b14a02f62c34bb0bdc938c3718b17bc is 1080, key is row0062/info:/1733456977026/Put/seqid=0 2024-12-06T03:49:53,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/975276476c574038b04a57a9afdc2fdc as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/975276476c574038b04a57a9afdc2fdc 2024-12-06T03:49:53,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/975276476c574038b04a57a9afdc2fdc, entries=12, sequenceid=193, filesize=17.5 K 2024-12-06T03:49:53,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for 9137d99742c4c20a6bcf5013532694f1 in 38ms, sequenceid=193, compaction requested=false 2024-12-06T03:49:53,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:53,310 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-06T03:49:53,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741862_1038 (size=84390) 2024-12-06T03:49:53,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741862_1038 (size=84390) 2024-12-06T03:49:53,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/70fd98ac6e614c2b945b9a0ff09089ab is 1080, key is row0147/info:/1733456993272/Put/seqid=0 2024-12-06T03:49:53,327 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/5b14a02f62c34bb0bdc938c3718b17bc as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5b14a02f62c34bb0bdc938c3718b17bc 2024-12-06T03:49:53,333 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9137d99742c4c20a6bcf5013532694f1/info of 9137d99742c4c20a6bcf5013532694f1 into 5b14a02f62c34bb0bdc938c3718b17bc(size=82.4 K), total size for store is 99.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:53,333 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:53,333 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., storeName=9137d99742c4c20a6bcf5013532694f1/info, priority=13, startTime=1733456993270; duration=0sec 2024-12-06T03:49:53,334 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:53,334 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9137d99742c4c20a6bcf5013532694f1:info 2024-12-06T03:49:53,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741863_1039 (size=20078) 2024-12-06T03:49:53,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741863_1039 (size=20078) 2024-12-06T03:49:53,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/70fd98ac6e614c2b945b9a0ff09089ab 2024-12-06T03:49:53,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/70fd98ac6e614c2b945b9a0ff09089ab as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/70fd98ac6e614c2b945b9a0ff09089ab 2024-12-06T03:49:53,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/70fd98ac6e614c2b945b9a0ff09089ab, entries=14, sequenceid=210, filesize=19.6 K 2024-12-06T03:49:53,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for 9137d99742c4c20a6bcf5013532694f1 in 43ms, sequenceid=210, compaction requested=true 2024-12-06T03:49:53,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:53,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9137d99742c4c20a6bcf5013532694f1:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:49:53,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:53,354 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:49:53,355 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 122374 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:49:53,355 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1541): 9137d99742c4c20a6bcf5013532694f1/info is initiating minor compaction (all files) 2024-12-06T03:49:53,355 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9137d99742c4c20a6bcf5013532694f1/info in TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:53,355 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5b14a02f62c34bb0bdc938c3718b17bc, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/975276476c574038b04a57a9afdc2fdc, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/70fd98ac6e614c2b945b9a0ff09089ab] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp, totalSize=119.5 K 2024-12-06T03:49:53,355 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5b14a02f62c34bb0bdc938c3718b17bc, keycount=73, bloomtype=ROW, size=82.4 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733456977026 2024-12-06T03:49:53,355 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 975276476c574038b04a57a9afdc2fdc, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733456993246 2024-12-06T03:49:53,356 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70fd98ac6e614c2b945b9a0ff09089ab, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733456993272 2024-12-06T03:49:53,369 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9137d99742c4c20a6bcf5013532694f1#info#compaction#77 average throughput is 50.79 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:53,369 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/ab46e8e3073444a38e143183fbb9e562 is 1080, key is row0062/info:/1733456977026/Put/seqid=0 2024-12-06T03:49:53,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741864_1040 (size=112528) 2024-12-06T03:49:53,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741864_1040 (size=112528) 2024-12-06T03:49:53,386 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/ab46e8e3073444a38e143183fbb9e562 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/ab46e8e3073444a38e143183fbb9e562 2024-12-06T03:49:53,394 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9137d99742c4c20a6bcf5013532694f1/info of 9137d99742c4c20a6bcf5013532694f1 into ab46e8e3073444a38e143183fbb9e562(size=109.9 K), total size for store is 109.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:53,395 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:53,395 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., storeName=9137d99742c4c20a6bcf5013532694f1/info, priority=13, startTime=1733456993354; duration=0sec 2024-12-06T03:49:53,395 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:53,395 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9137d99742c4c20a6bcf5013532694f1:info 2024-12-06T03:49:53,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:53,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:54,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:54,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:54,885 INFO [master/6f1b912b0816:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T03:49:54,885 INFO [master/6f1b912b0816:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T03:49:55,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:55,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:49:55,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/7ae0c57238be40198589395e94e219a7 is 1080, key is row0161/info:/1733456995312/Put/seqid=0 2024-12-06T03:49:55,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741865_1041 (size=12516) 2024-12-06T03:49:55,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741865_1041 (size=12516) 2024-12-06T03:49:55,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/7ae0c57238be40198589395e94e219a7 2024-12-06T03:49:55,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/7ae0c57238be40198589395e94e219a7 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/7ae0c57238be40198589395e94e219a7 2024-12-06T03:49:55,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/7ae0c57238be40198589395e94e219a7, entries=7, sequenceid=222, filesize=12.2 K 2024-12-06T03:49:55,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 9137d99742c4c20a6bcf5013532694f1 in 27ms, sequenceid=222, compaction requested=false 2024-12-06T03:49:55,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:55,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:55,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-06T03:49:55,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/6266d7a039444cc0a1a53eb75a07c369 is 1080, key is row0168/info:/1733456995328/Put/seqid=0 2024-12-06T03:49:55,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741866_1042 (size=20078) 2024-12-06T03:49:55,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741866_1042 (size=20078) 2024-12-06T03:49:55,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/6266d7a039444cc0a1a53eb75a07c369 2024-12-06T03:49:55,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/6266d7a039444cc0a1a53eb75a07c369 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/6266d7a039444cc0a1a53eb75a07c369 2024-12-06T03:49:55,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/6266d7a039444cc0a1a53eb75a07c369, entries=14, sequenceid=239, filesize=19.6 K 2024-12-06T03:49:55,376 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 9137d99742c4c20a6bcf5013532694f1 in 22ms, sequenceid=239, compaction requested=true 2024-12-06T03:49:55,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:55,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9137d99742c4c20a6bcf5013532694f1:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:49:55,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:55,376 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:49:55,377 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 145122 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:49:55,377 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1541): 9137d99742c4c20a6bcf5013532694f1/info is initiating minor compaction (all files) 2024-12-06T03:49:55,377 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9137d99742c4c20a6bcf5013532694f1/info in TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:55,377 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/ab46e8e3073444a38e143183fbb9e562, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/7ae0c57238be40198589395e94e219a7, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/6266d7a039444cc0a1a53eb75a07c369] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp, totalSize=141.7 K 2024-12-06T03:49:55,378 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting ab46e8e3073444a38e143183fbb9e562, keycount=99, bloomtype=ROW, size=109.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733456977026 2024-12-06T03:49:55,378 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7ae0c57238be40198589395e94e219a7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1733456995312 2024-12-06T03:49:55,378 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6266d7a039444cc0a1a53eb75a07c369, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733456995328 2024-12-06T03:49:55,388 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9137d99742c4c20a6bcf5013532694f1#info#compaction#80 average throughput is 61.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:55,389 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/e9e17b817b2341ec94d2e241ebb0e4b9 is 1080, key is row0062/info:/1733456977026/Put/seqid=0 2024-12-06T03:49:55,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741867_1043 (size=135400) 2024-12-06T03:49:55,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741867_1043 (size=135400) 2024-12-06T03:49:55,397 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/e9e17b817b2341ec94d2e241ebb0e4b9 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e9e17b817b2341ec94d2e241ebb0e4b9 2024-12-06T03:49:55,402 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9137d99742c4c20a6bcf5013532694f1/info of 9137d99742c4c20a6bcf5013532694f1 into e9e17b817b2341ec94d2e241ebb0e4b9(size=132.2 K), total size for store is 132.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:55,402 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:55,402 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., storeName=9137d99742c4c20a6bcf5013532694f1/info, priority=13, startTime=1733456995376; duration=0sec 2024-12-06T03:49:55,402 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:55,402 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9137d99742c4c20a6bcf5013532694f1:info 2024-12-06T03:49:55,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:55,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:56,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:56,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:57,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:57,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-06T03:49:57,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/9c6543d947dd4fa1959806a3e6fd03b1 is 1080, key is row0182/info:/1733456995355/Put/seqid=0 2024-12-06T03:49:57,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741868_1044 (size=17906) 2024-12-06T03:49:57,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741868_1044 (size=17906) 2024-12-06T03:49:57,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/9c6543d947dd4fa1959806a3e6fd03b1 2024-12-06T03:49:57,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/9c6543d947dd4fa1959806a3e6fd03b1 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9c6543d947dd4fa1959806a3e6fd03b1 2024-12-06T03:49:57,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9c6543d947dd4fa1959806a3e6fd03b1, entries=12, sequenceid=255, filesize=17.5 K 2024-12-06T03:49:57,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 9137d99742c4c20a6bcf5013532694f1 in 29ms, sequenceid=255, compaction requested=false 2024-12-06T03:49:57,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:57,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:49:57,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-06T03:49:57,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/a8aec31ee0284c898ed564746af03773 is 1080, key is row0194/info:/1733456997383/Put/seqid=0 2024-12-06T03:49:57,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741869_1045 (size=17918) 2024-12-06T03:49:57,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741869_1045 (size=17918) 2024-12-06T03:49:57,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9137d99742c4c20a6bcf5013532694f1, server=6f1b912b0816,33823,1733456953577 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-06T03:49:57,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47108 deadline: 1733457007440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9137d99742c4c20a6bcf5013532694f1, server=6f1b912b0816,33823,1733456953577 2024-12-06T03:49:57,440 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., hostname=6f1b912b0816,33823,1733456953577, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., hostname=6f1b912b0816,33823,1733456953577, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9137d99742c4c20a6bcf5013532694f1, server=6f1b912b0816,33823,1733456953577 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-06T03:49:57,441 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., hostname=6f1b912b0816,33823,1733456953577, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=9137d99742c4c20a6bcf5013532694f1, server=6f1b912b0816,33823,1733456953577 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-06T03:49:57,441 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., hostname=6f1b912b0816,33823,1733456953577, seqNum=127 because the exception is null or not the one we care about 2024-12-06T03:49:57,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:57,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:57,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/a8aec31ee0284c898ed564746af03773 2024-12-06T03:49:57,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/a8aec31ee0284c898ed564746af03773 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/a8aec31ee0284c898ed564746af03773 2024-12-06T03:49:57,842 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/a8aec31ee0284c898ed564746af03773, entries=12, sequenceid=270, filesize=17.5 K 2024-12-06T03:49:57,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 9137d99742c4c20a6bcf5013532694f1 in 432ms, sequenceid=270, compaction requested=true 2024-12-06T03:49:57,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:57,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9137d99742c4c20a6bcf5013532694f1:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:49:57,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:57,843 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:49:57,844 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 171224 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:49:57,844 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1541): 9137d99742c4c20a6bcf5013532694f1/info is initiating minor compaction (all files) 2024-12-06T03:49:57,844 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9137d99742c4c20a6bcf5013532694f1/info in TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:49:57,844 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e9e17b817b2341ec94d2e241ebb0e4b9, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9c6543d947dd4fa1959806a3e6fd03b1, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/a8aec31ee0284c898ed564746af03773] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp, totalSize=167.2 K 2024-12-06T03:49:57,845 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting e9e17b817b2341ec94d2e241ebb0e4b9, keycount=120, bloomtype=ROW, size=132.2 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733456977026 2024-12-06T03:49:57,845 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9c6543d947dd4fa1959806a3e6fd03b1, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733456995355 2024-12-06T03:49:57,845 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting a8aec31ee0284c898ed564746af03773, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733456997383 2024-12-06T03:49:57,855 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9137d99742c4c20a6bcf5013532694f1#info#compaction#83 average throughput is 73.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:49:57,856 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/60e7ccbffd94426498af630cdd31276c is 1080, key is row0062/info:/1733456977026/Put/seqid=0 2024-12-06T03:49:57,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741870_1046 (size=161463) 2024-12-06T03:49:57,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741870_1046 (size=161463) 2024-12-06T03:49:57,863 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/60e7ccbffd94426498af630cdd31276c as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/60e7ccbffd94426498af630cdd31276c 2024-12-06T03:49:57,869 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9137d99742c4c20a6bcf5013532694f1/info of 9137d99742c4c20a6bcf5013532694f1 into 60e7ccbffd94426498af630cdd31276c(size=157.7 K), total size for store is 157.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:49:57,869 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:49:57,869 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., storeName=9137d99742c4c20a6bcf5013532694f1/info, priority=13, startTime=1733456997843; duration=0sec 2024-12-06T03:49:57,869 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:49:57,869 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9137d99742c4c20a6bcf5013532694f1:info 2024-12-06T03:49:58,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:58,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:59,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:49:59,712 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-06T03:49:59,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:00,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:00,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:01,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:01,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:02,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:02,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:03,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:03,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:04,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:04,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:05,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:05,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:06,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:06,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:50:07,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-06T03:50:07,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/fa744eb93cbe4262a95ffc46fbc560f6 is 1080, key is row0206/info:/1733456997413/Put/seqid=0 2024-12-06T03:50:07,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741871_1047 (size=24412) 2024-12-06T03:50:07,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741871_1047 (size=24412) 2024-12-06T03:50:07,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/fa744eb93cbe4262a95ffc46fbc560f6 2024-12-06T03:50:07,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/fa744eb93cbe4262a95ffc46fbc560f6 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/fa744eb93cbe4262a95ffc46fbc560f6 2024-12-06T03:50:07,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/fa744eb93cbe4262a95ffc46fbc560f6, entries=18, sequenceid=292, filesize=23.8 K 2024-12-06T03:50:07,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for 9137d99742c4c20a6bcf5013532694f1 in 24ms, sequenceid=292, compaction requested=false 2024-12-06T03:50:07,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:50:07,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:07,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:08,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:08,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:50:09,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T03:50:09,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/c74e724f01614d8d91ccc261bbcf2f8d is 1080, key is row0224/info:/1733457007513/Put/seqid=0 2024-12-06T03:50:09,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741872_1048 (size=12523) 2024-12-06T03:50:09,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741872_1048 (size=12523) 2024-12-06T03:50:09,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/c74e724f01614d8d91ccc261bbcf2f8d 2024-12-06T03:50:09,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/c74e724f01614d8d91ccc261bbcf2f8d as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c74e724f01614d8d91ccc261bbcf2f8d 2024-12-06T03:50:09,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c74e724f01614d8d91ccc261bbcf2f8d, entries=7, sequenceid=302, filesize=12.2 K 2024-12-06T03:50:09,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 9137d99742c4c20a6bcf5013532694f1 in 25ms, sequenceid=302, compaction requested=true 2024-12-06T03:50:09,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:50:09,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9137d99742c4c20a6bcf5013532694f1:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T03:50:09,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:50:09,550 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T03:50:09,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:50:09,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-06T03:50:09,552 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 198398 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T03:50:09,552 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1541): 9137d99742c4c20a6bcf5013532694f1/info is initiating minor compaction (all files) 2024-12-06T03:50:09,552 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9137d99742c4c20a6bcf5013532694f1/info in TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:50:09,552 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/60e7ccbffd94426498af630cdd31276c, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/fa744eb93cbe4262a95ffc46fbc560f6, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c74e724f01614d8d91ccc261bbcf2f8d] into tmpdir=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp, totalSize=193.7 K 2024-12-06T03:50:09,553 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting 60e7ccbffd94426498af630cdd31276c, keycount=144, bloomtype=ROW, size=157.7 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733456977026 2024-12-06T03:50:09,553 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting fa744eb93cbe4262a95ffc46fbc560f6, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733456997413 2024-12-06T03:50:09,553 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] compactions.Compactor(225): Compacting c74e724f01614d8d91ccc261bbcf2f8d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1733457007513 2024-12-06T03:50:09,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/91e7438de1654e009e9858e0f5da6e7c is 1080, key is row0231/info:/1733457009527/Put/seqid=0 2024-12-06T03:50:09,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741873_1049 (size=19013) 2024-12-06T03:50:09,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741873_1049 (size=19013) 2024-12-06T03:50:09,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/91e7438de1654e009e9858e0f5da6e7c 2024-12-06T03:50:09,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/91e7438de1654e009e9858e0f5da6e7c as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/91e7438de1654e009e9858e0f5da6e7c 2024-12-06T03:50:09,571 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9137d99742c4c20a6bcf5013532694f1#info#compaction#87 average throughput is 57.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T03:50:09,572 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/a596d3237d854c9c9d7fb542a759f5ab is 1080, key is row0062/info:/1733456977026/Put/seqid=0 2024-12-06T03:50:09,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741874_1050 (size=188548) 2024-12-06T03:50:09,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741874_1050 (size=188548) 2024-12-06T03:50:09,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/91e7438de1654e009e9858e0f5da6e7c, entries=13, sequenceid=318, filesize=18.6 K 2024-12-06T03:50:09,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 9137d99742c4c20a6bcf5013532694f1 in 32ms, sequenceid=318, compaction requested=false 2024-12-06T03:50:09,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:50:09,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] regionserver.HRegion(8855): Flush requested on 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:50:09,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9137d99742c4c20a6bcf5013532694f1 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-06T03:50:09,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/7aa08d625f804f06bf5d0894bcd8c68c is 1080, key is row0244/info:/1733457009552/Put/seqid=0 2024-12-06T03:50:09,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741875_1051 (size=19013) 2024-12-06T03:50:09,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741875_1051 (size=19013) 2024-12-06T03:50:09,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/7aa08d625f804f06bf5d0894bcd8c68c 2024-12-06T03:50:09,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/7aa08d625f804f06bf5d0894bcd8c68c as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/7aa08d625f804f06bf5d0894bcd8c68c 2024-12-06T03:50:09,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/7aa08d625f804f06bf5d0894bcd8c68c, entries=13, sequenceid=334, filesize=18.6 K 2024-12-06T03:50:09,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for 9137d99742c4c20a6bcf5013532694f1 in 25ms, sequenceid=334, compaction requested=false 2024-12-06T03:50:09,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:50:09,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:09,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:09,985 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/.tmp/info/a596d3237d854c9c9d7fb542a759f5ab as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/a596d3237d854c9c9d7fb542a759f5ab 2024-12-06T03:50:09,990 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9137d99742c4c20a6bcf5013532694f1/info of 9137d99742c4c20a6bcf5013532694f1 into a596d3237d854c9c9d7fb542a759f5ab(size=184.1 K), total size for store is 221.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T03:50:09,990 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:50:09,991 INFO [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., storeName=9137d99742c4c20a6bcf5013532694f1/info, priority=13, startTime=1733457009550; duration=0sec 2024-12-06T03:50:09,991 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T03:50:09,991 DEBUG [RS:0;6f1b912b0816:33823-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9137d99742c4c20a6bcf5013532694f1:info 2024-12-06T03:50:10,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:10,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:11,586 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-06T03:50:11,586 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C33823%2C1733456953577.1733457011586 2024-12-06T03:50:11,606 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,607 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,607 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,607 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,607 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,607 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/WALs/6f1b912b0816,33823,1733456953577/6f1b912b0816%2C33823%2C1733456953577.1733456954078 with entries=314, filesize=309.13 KB; new WAL /user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/WALs/6f1b912b0816,33823,1733456953577/6f1b912b0816%2C33823%2C1733456953577.1733457011586 2024-12-06T03:50:11,610 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39135:39135),(127.0.0.1/127.0.0.1:35419:35419)] 2024-12-06T03:50:11,610 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/WALs/6f1b912b0816,33823,1733456953577/6f1b912b0816%2C33823%2C1733456953577.1733456954078 is not closed yet, will try archiving it next time 2024-12-06T03:50:11,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741833_1009 (size=316559) 2024-12-06T03:50:11,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741833_1009 (size=316559) 2024-12-06T03:50:11,611 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/WALs/6f1b912b0816,33823,1733456953577/6f1b912b0816%2C33823%2C1733456953577.1733456954078 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/oldWALs/6f1b912b0816%2C33823%2C1733456953577.1733456954078 2024-12-06T03:50:11,613 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-06T03:50:11,617 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/info/797add9c261749b8b602c20a78f790ed is 186, key is TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3./info:regioninfo/1733456979847/Put/seqid=0 2024-12-06T03:50:11,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741877_1053 (size=6153) 2024-12-06T03:50:11,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741877_1053 (size=6153) 2024-12-06T03:50:11,622 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/info/797add9c261749b8b602c20a78f790ed 2024-12-06T03:50:11,627 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/.tmp/info/797add9c261749b8b602c20a78f790ed as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/info/797add9c261749b8b602c20a78f790ed 2024-12-06T03:50:11,632 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/info/797add9c261749b8b602c20a78f790ed, entries=5, sequenceid=21, filesize=6.0 K 2024-12-06T03:50:11,633 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-12-06T03:50:11,633 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-06T03:50:11,633 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 9137d99742c4c20a6bcf5013532694f1: 2024-12-06T03:50:11,633 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 64e8b1893dc9f355346e3f21b27481c3: 2024-12-06T03:50:11,634 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T03:50:11,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T03:50:11,634 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:50:11,634 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:50:11,634 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:11,634 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:11,634 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T03:50:11,635 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T03:50:11,635 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=605798265, stopped=false 2024-12-06T03:50:11,635 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6f1b912b0816,33419,1733456953434 2024-12-06T03:50:11,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:11,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:50:11,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:50:11,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:11,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:11,693 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:50:11,694 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:50:11,694 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:50:11,694 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:50:11,694 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:50:11,694 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:11,694 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6f1b912b0816,33823,1733456953577' ***** 2024-12-06T03:50:11,694 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T03:50:11,695 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(3091): Received CLOSE for 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(3091): Received CLOSE for 64e8b1893dc9f355346e3f21b27481c3 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(959): stopping server 6f1b912b0816,33823,1733456953577 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6f1b912b0816:33823. 2024-12-06T03:50:11,695 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9137d99742c4c20a6bcf5013532694f1, disabling compactions & flushes 2024-12-06T03:50:11,695 DEBUG [RS:0;6f1b912b0816:33823 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:50:11,695 DEBUG [RS:0;6f1b912b0816:33823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:11,695 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:50:11,695 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:50:11,695 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. after waiting 0 ms 2024-12-06T03:50:11,695 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T03:50:11,695 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T03:50:11,696 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-06T03:50:11,696 DEBUG [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 9137d99742c4c20a6bcf5013532694f1=TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1., 64e8b1893dc9f355346e3f21b27481c3=TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3.} 2024-12-06T03:50:11,696 DEBUG [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 64e8b1893dc9f355346e3f21b27481c3, 9137d99742c4c20a6bcf5013532694f1 2024-12-06T03:50:11,696 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:50:11,696 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:50:11,696 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:50:11,696 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:50:11,696 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:50:11,696 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d->hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/9e8300e342d1471694c32f5e1cb49c38-top, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-f349df77c76c4697be056fa5e9133583, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-de2e2723042b4290877cb982e6947d98, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/18c98f4d63e54c1cb26eb961ef1cf187, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-2fb1865141aa4862a15a1d091b9ab214, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c9cd7ecd2a564b599bfc8777ae875027, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c6b0cb981b2e45b7bcd2693dc9cde2d4, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e6ced80066ef44adb6979a93b1ab8d14, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5cdb1eef59f14b3ba91b4aa7a4fd28de, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5b14a02f62c34bb0bdc938c3718b17bc, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/f1f46ad690534d7ca2d7218dca11aaed, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/975276476c574038b04a57a9afdc2fdc, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/ab46e8e3073444a38e143183fbb9e562, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/70fd98ac6e614c2b945b9a0ff09089ab, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/7ae0c57238be40198589395e94e219a7, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e9e17b817b2341ec94d2e241ebb0e4b9, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/6266d7a039444cc0a1a53eb75a07c369, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9c6543d947dd4fa1959806a3e6fd03b1, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/60e7ccbffd94426498af630cdd31276c, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/a8aec31ee0284c898ed564746af03773, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/fa744eb93cbe4262a95ffc46fbc560f6, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c74e724f01614d8d91ccc261bbcf2f8d] to archive 2024-12-06T03:50:11,697 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T03:50:11,698 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:50:11,699 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-f349df77c76c4697be056fa5e9133583 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-f349df77c76c4697be056fa5e9133583 2024-12-06T03:50:11,700 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-de2e2723042b4290877cb982e6947d98 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-de2e2723042b4290877cb982e6947d98 2024-12-06T03:50:11,701 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/18c98f4d63e54c1cb26eb961ef1cf187 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/18c98f4d63e54c1cb26eb961ef1cf187 2024-12-06T03:50:11,702 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-06T03:50:11,702 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:50:11,702 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:50:11,702 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-2fb1865141aa4862a15a1d091b9ab214 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/TestLogRolling-testLogRolling=6737d9a3b3f50844292ae9b1e1e8e03d-2fb1865141aa4862a15a1d091b9ab214 2024-12-06T03:50:11,702 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733457011696Running coprocessor pre-close hooks at 1733457011696Disabling compacts and flushes for region at 1733457011696Disabling writes for close at 1733457011696Writing region close event to WAL at 1733457011699 (+3 ms)Running coprocessor post-close hooks at 1733457011702 (+3 ms)Closed at 1733457011702 2024-12-06T03:50:11,703 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T03:50:11,704 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c9cd7ecd2a564b599bfc8777ae875027 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c9cd7ecd2a564b599bfc8777ae875027 2024-12-06T03:50:11,705 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c6b0cb981b2e45b7bcd2693dc9cde2d4 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c6b0cb981b2e45b7bcd2693dc9cde2d4 2024-12-06T03:50:11,705 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e6ced80066ef44adb6979a93b1ab8d14 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e6ced80066ef44adb6979a93b1ab8d14 2024-12-06T03:50:11,706 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5cdb1eef59f14b3ba91b4aa7a4fd28de to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5cdb1eef59f14b3ba91b4aa7a4fd28de 2024-12-06T03:50:11,707 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5b14a02f62c34bb0bdc938c3718b17bc to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/5b14a02f62c34bb0bdc938c3718b17bc 2024-12-06T03:50:11,709 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/f1f46ad690534d7ca2d7218dca11aaed to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/f1f46ad690534d7ca2d7218dca11aaed 2024-12-06T03:50:11,710 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/975276476c574038b04a57a9afdc2fdc to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/975276476c574038b04a57a9afdc2fdc 2024-12-06T03:50:11,713 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/ab46e8e3073444a38e143183fbb9e562 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/ab46e8e3073444a38e143183fbb9e562 2024-12-06T03:50:11,714 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/70fd98ac6e614c2b945b9a0ff09089ab to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/70fd98ac6e614c2b945b9a0ff09089ab 2024-12-06T03:50:11,715 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/7ae0c57238be40198589395e94e219a7 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/7ae0c57238be40198589395e94e219a7 2024-12-06T03:50:11,716 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e9e17b817b2341ec94d2e241ebb0e4b9 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/e9e17b817b2341ec94d2e241ebb0e4b9 2024-12-06T03:50:11,717 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/6266d7a039444cc0a1a53eb75a07c369 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/6266d7a039444cc0a1a53eb75a07c369 2024-12-06T03:50:11,718 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9c6543d947dd4fa1959806a3e6fd03b1 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/9c6543d947dd4fa1959806a3e6fd03b1 2024-12-06T03:50:11,719 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/60e7ccbffd94426498af630cdd31276c to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/60e7ccbffd94426498af630cdd31276c 2024-12-06T03:50:11,720 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/a8aec31ee0284c898ed564746af03773 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/a8aec31ee0284c898ed564746af03773 2024-12-06T03:50:11,721 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/fa744eb93cbe4262a95ffc46fbc560f6 to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/fa744eb93cbe4262a95ffc46fbc560f6 2024-12-06T03:50:11,722 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c74e724f01614d8d91ccc261bbcf2f8d to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/info/c74e724f01614d8d91ccc261bbcf2f8d 2024-12-06T03:50:11,723 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=6f1b912b0816:33419 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-06T03:50:11,723 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [18c98f4d63e54c1cb26eb961ef1cf187=43081, c9cd7ecd2a564b599bfc8777ae875027=12516, c6b0cb981b2e45b7bcd2693dc9cde2d4=63733, e6ced80066ef44adb6979a93b1ab8d14=17906, 5cdb1eef59f14b3ba91b4aa7a4fd28de=17906, 5b14a02f62c34bb0bdc938c3718b17bc=84390, f1f46ad690534d7ca2d7218dca11aaed=12516, 975276476c574038b04a57a9afdc2fdc=17906, ab46e8e3073444a38e143183fbb9e562=112528, 70fd98ac6e614c2b945b9a0ff09089ab=20078, 7ae0c57238be40198589395e94e219a7=12516, e9e17b817b2341ec94d2e241ebb0e4b9=135400, 6266d7a039444cc0a1a53eb75a07c369=20078, 9c6543d947dd4fa1959806a3e6fd03b1=17906, 60e7ccbffd94426498af630cdd31276c=161463, a8aec31ee0284c898ed564746af03773=17918, fa744eb93cbe4262a95ffc46fbc560f6=24412, c74e724f01614d8d91ccc261bbcf2f8d=12523] 2024-12-06T03:50:11,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:11,732 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/9137d99742c4c20a6bcf5013532694f1/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=126 2024-12-06T03:50:11,733 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:50:11,733 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9137d99742c4c20a6bcf5013532694f1: Waiting for close lock at 1733457011695Running coprocessor pre-close hooks at 1733457011695Disabling compacts and flushes for region at 1733457011695Disabling writes for close at 1733457011695Writing region close event to WAL at 1733457011728 (+33 ms)Running coprocessor post-close hooks at 1733457011733 (+5 ms)Closed at 1733457011733 2024-12-06T03:50:11,733 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733456979125.9137d99742c4c20a6bcf5013532694f1. 2024-12-06T03:50:11,733 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 64e8b1893dc9f355346e3f21b27481c3, disabling compactions & flushes 2024-12-06T03:50:11,733 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:50:11,733 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:50:11,733 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. after waiting 0 ms 2024-12-06T03:50:11,733 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:50:11,734 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d->hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/6737d9a3b3f50844292ae9b1e1e8e03d/info/9e8300e342d1471694c32f5e1cb49c38-bottom] to archive 2024-12-06T03:50:11,735 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T03:50:11,736 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d to hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/archive/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/info/9e8300e342d1471694c32f5e1cb49c38.6737d9a3b3f50844292ae9b1e1e8e03d 2024-12-06T03:50:11,736 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-06T03:50:11,740 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/data/default/TestLogRolling-testLogRolling/64e8b1893dc9f355346e3f21b27481c3/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-12-06T03:50:11,741 INFO [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:50:11,741 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 64e8b1893dc9f355346e3f21b27481c3: Waiting for close lock at 1733457011733Running coprocessor pre-close hooks at 1733457011733Disabling compacts and flushes for region at 1733457011733Disabling writes for close at 1733457011733Writing region close event to WAL at 1733457011737 (+4 ms)Running coprocessor post-close hooks at 1733457011741 (+4 ms)Closed at 1733457011741 2024-12-06T03:50:11,741 DEBUG [RS_CLOSE_REGION-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733456979125.64e8b1893dc9f355346e3f21b27481c3. 2024-12-06T03:50:11,896 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(976): stopping server 6f1b912b0816,33823,1733456953577; all regions closed. 2024-12-06T03:50:11,896 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,896 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,897 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,897 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,897 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741834_1010 (size=8107) 2024-12-06T03:50:11,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741834_1010 (size=8107) 2024-12-06T03:50:11,903 DEBUG [RS:0;6f1b912b0816:33823 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/oldWALs 2024-12-06T03:50:11,903 INFO [RS:0;6f1b912b0816:33823 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C33823%2C1733456953577.meta:.meta(num 1733456954693) 2024-12-06T03:50:11,903 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,903 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,903 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,903 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,903 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:11,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741876_1052 (size=780) 2024-12-06T03:50:11,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741876_1052 (size=780) 2024-12-06T03:50:11,907 DEBUG [RS:0;6f1b912b0816:33823 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/oldWALs 2024-12-06T03:50:11,907 INFO [RS:0;6f1b912b0816:33823 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C33823%2C1733456953577:(num 1733457011586) 2024-12-06T03:50:11,907 DEBUG [RS:0;6f1b912b0816:33823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:11,907 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:50:11,907 INFO [RS:0;6f1b912b0816:33823 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:50:11,908 INFO [RS:0;6f1b912b0816:33823 {}] hbase.ChoreService(370): Chore service for: regionserver/6f1b912b0816:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T03:50:11,908 INFO [RS:0;6f1b912b0816:33823 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:50:11,908 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:50:11,908 INFO [RS:0;6f1b912b0816:33823 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33823 2024-12-06T03:50:11,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6f1b912b0816,33823,1733456953577 2024-12-06T03:50:11,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:50:11,935 INFO [RS:0;6f1b912b0816:33823 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:50:11,943 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6f1b912b0816,33823,1733456953577] 2024-12-06T03:50:11,951 INFO [regionserver/6f1b912b0816:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:50:11,951 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6f1b912b0816,33823,1733456953577 already deleted, retry=false 2024-12-06T03:50:11,951 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6f1b912b0816,33823,1733456953577 expired; onlineServers=0 2024-12-06T03:50:11,951 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6f1b912b0816,33419,1733456953434' ***** 2024-12-06T03:50:11,951 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T03:50:11,951 INFO [M:0;6f1b912b0816:33419 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:50:11,952 INFO [M:0;6f1b912b0816:33419 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:50:11,952 DEBUG [M:0;6f1b912b0816:33419 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T03:50:11,952 DEBUG [M:0;6f1b912b0816:33419 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T03:50:11,952 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T03:50:11,952 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456953883 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733456953883,5,FailOnTimeoutGroup] 2024-12-06T03:50:11,952 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456953883 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733456953883,5,FailOnTimeoutGroup] 2024-12-06T03:50:11,952 INFO [M:0;6f1b912b0816:33419 {}] hbase.ChoreService(370): Chore service for: master/6f1b912b0816:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T03:50:11,952 INFO [M:0;6f1b912b0816:33419 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:50:11,952 DEBUG [M:0;6f1b912b0816:33419 {}] master.HMaster(1795): Stopping service threads 2024-12-06T03:50:11,952 INFO [M:0;6f1b912b0816:33419 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T03:50:11,952 INFO [M:0;6f1b912b0816:33419 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:50:11,952 INFO [M:0;6f1b912b0816:33419 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T03:50:11,953 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T03:50:11,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T03:50:11,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:11,960 DEBUG [M:0;6f1b912b0816:33419 {}] zookeeper.ZKUtil(347): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T03:50:11,960 WARN [M:0;6f1b912b0816:33419 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T03:50:11,960 INFO [M:0;6f1b912b0816:33419 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/.lastflushedseqids 2024-12-06T03:50:11,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741878_1054 (size=228) 2024-12-06T03:50:11,966 INFO [M:0;6f1b912b0816:33419 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T03:50:11,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741878_1054 (size=228) 2024-12-06T03:50:11,966 INFO [M:0;6f1b912b0816:33419 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T03:50:11,966 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:50:11,966 INFO [M:0;6f1b912b0816:33419 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:11,966 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:11,966 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:50:11,966 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:11,966 INFO [M:0;6f1b912b0816:33419 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-12-06T03:50:11,980 DEBUG [M:0;6f1b912b0816:33419 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f43800b88ac04e9cb43dfdb1964c2939 is 82, key is hbase:meta,,1/info:regioninfo/1733456954731/Put/seqid=0 2024-12-06T03:50:11,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741879_1055 (size=5672) 2024-12-06T03:50:11,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741879_1055 (size=5672) 2024-12-06T03:50:11,985 INFO [M:0;6f1b912b0816:33419 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f43800b88ac04e9cb43dfdb1964c2939 2024-12-06T03:50:12,008 DEBUG [M:0;6f1b912b0816:33419 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0198f508088d44308ef38984f695112a is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733456955167/Put/seqid=0 2024-12-06T03:50:12,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741880_1056 (size=7090) 2024-12-06T03:50:12,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741880_1056 (size=7090) 2024-12-06T03:50:12,013 INFO [M:0;6f1b912b0816:33419 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0198f508088d44308ef38984f695112a 2024-12-06T03:50:12,017 INFO [M:0;6f1b912b0816:33419 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0198f508088d44308ef38984f695112a 2024-12-06T03:50:12,031 DEBUG [M:0;6f1b912b0816:33419 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/107c0b1d9ddf4b8b9a990bb11ba5020d is 69, key is 6f1b912b0816,33823,1733456953577/rs:state/1733456953929/Put/seqid=0 2024-12-06T03:50:12,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741881_1057 (size=5156) 2024-12-06T03:50:12,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741881_1057 (size=5156) 2024-12-06T03:50:12,039 INFO [M:0;6f1b912b0816:33419 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/107c0b1d9ddf4b8b9a990bb11ba5020d 2024-12-06T03:50:12,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:50:12,043 INFO [RS:0;6f1b912b0816:33823 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:50:12,043 INFO [RS:0;6f1b912b0816:33823 {}] regionserver.HRegionServer(1031): Exiting; stopping=6f1b912b0816,33823,1733456953577; zookeeper connection closed. 2024-12-06T03:50:12,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33823-0x101aa0b93120001, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:50:12,044 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5f444f2c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5f444f2c 2024-12-06T03:50:12,044 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T03:50:12,056 DEBUG [M:0;6f1b912b0816:33419 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7b5cf9268a1e4d2694edb4719a561e4f is 52, key is load_balancer_on/state:d/1733456954805/Put/seqid=0 2024-12-06T03:50:12,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741882_1058 (size=5056) 2024-12-06T03:50:12,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741882_1058 (size=5056) 2024-12-06T03:50:12,065 INFO [M:0;6f1b912b0816:33419 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7b5cf9268a1e4d2694edb4719a561e4f 2024-12-06T03:50:12,069 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f43800b88ac04e9cb43dfdb1964c2939 as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f43800b88ac04e9cb43dfdb1964c2939 2024-12-06T03:50:12,073 INFO [M:0;6f1b912b0816:33419 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f43800b88ac04e9cb43dfdb1964c2939, entries=8, sequenceid=125, filesize=5.5 K 2024-12-06T03:50:12,074 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0198f508088d44308ef38984f695112a as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0198f508088d44308ef38984f695112a 2024-12-06T03:50:12,078 INFO [M:0;6f1b912b0816:33419 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0198f508088d44308ef38984f695112a 2024-12-06T03:50:12,078 INFO [M:0;6f1b912b0816:33419 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0198f508088d44308ef38984f695112a, entries=13, sequenceid=125, filesize=6.9 K 2024-12-06T03:50:12,079 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/107c0b1d9ddf4b8b9a990bb11ba5020d as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/107c0b1d9ddf4b8b9a990bb11ba5020d 2024-12-06T03:50:12,084 INFO [M:0;6f1b912b0816:33419 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/107c0b1d9ddf4b8b9a990bb11ba5020d, entries=1, sequenceid=125, filesize=5.0 K 2024-12-06T03:50:12,085 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7b5cf9268a1e4d2694edb4719a561e4f as hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7b5cf9268a1e4d2694edb4719a561e4f 2024-12-06T03:50:12,089 INFO [M:0;6f1b912b0816:33419 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37527/user/jenkins/test-data/b09222f5-1233-bc52-b8bf-66df979a4d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7b5cf9268a1e4d2694edb4719a561e4f, entries=1, sequenceid=125, filesize=4.9 K 2024-12-06T03:50:12,090 INFO [M:0;6f1b912b0816:33419 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=125, compaction requested=false 2024-12-06T03:50:12,091 INFO [M:0;6f1b912b0816:33419 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:12,092 DEBUG [M:0;6f1b912b0816:33419 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733457011966Disabling compacts and flushes for region at 1733457011966Disabling writes for close at 1733457011966Obtaining lock to block concurrent updates at 1733457011966Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733457011966Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1733457011967 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733457011967Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733457011967Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733457011980 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733457011980Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733457011989 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733457012007 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733457012008 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733457012018 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733457012031 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733457012031Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733457012043 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733457012055 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733457012056 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@120954a7: reopening flushed file at 1733457012068 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7459418: reopening flushed file at 1733457012073 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c57ca77: reopening flushed file at 1733457012078 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6159cbf5: reopening flushed file at 1733457012084 (+6 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=125, compaction requested=false at 1733457012090 (+6 ms)Writing region close event to WAL at 1733457012091 (+1 ms)Closed at 1733457012091 2024-12-06T03:50:12,094 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:12,094 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:12,094 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:12,094 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:12,094 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:12,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35517 is added to blk_1073741830_1006 (size=61320) 2024-12-06T03:50:12,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39961 is added to blk_1073741830_1006 (size=61320) 2024-12-06T03:50:12,102 INFO [M:0;6f1b912b0816:33419 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T03:50:12,102 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:50:12,102 INFO [M:0;6f1b912b0816:33419 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33419 2024-12-06T03:50:12,102 INFO [M:0;6f1b912b0816:33419 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:50:12,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:50:12,225 INFO [M:0;6f1b912b0816:33419 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:50:12,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33419-0x101aa0b93120000, quorum=127.0.0.1:64235, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:50:12,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@311e029{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:50:12,265 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78e8d841{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:50:12,265 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:50:12,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c6d9015{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:50:12,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@446d58fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.log.dir/,STOPPED} 2024-12-06T03:50:12,267 WARN [BP-893510456-172.17.0.2-1733456951750 heartbeating to localhost/127.0.0.1:37527 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:50:12,267 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:50:12,267 WARN [BP-893510456-172.17.0.2-1733456951750 heartbeating to localhost/127.0.0.1:37527 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-893510456-172.17.0.2-1733456951750 (Datanode Uuid fec8d7bc-7d4a-47fc-a8d5-8a5734f3ff80) service to localhost/127.0.0.1:37527 2024-12-06T03:50:12,267 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:50:12,268 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/data/data3/current/BP-893510456-172.17.0.2-1733456951750 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:50:12,268 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/data/data4/current/BP-893510456-172.17.0.2-1733456951750 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:50:12,268 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:50:12,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26c07cbd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:50:12,272 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6daf3b21{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:50:12,272 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:50:12,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10eef1f0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:50:12,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@500bd0b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.log.dir/,STOPPED} 2024-12-06T03:50:12,276 WARN [BP-893510456-172.17.0.2-1733456951750 heartbeating to localhost/127.0.0.1:37527 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:50:12,276 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:50:12,276 WARN [BP-893510456-172.17.0.2-1733456951750 heartbeating to localhost/127.0.0.1:37527 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-893510456-172.17.0.2-1733456951750 (Datanode Uuid eb089f06-7085-48dd-8873-ee2654e36b3b) service to localhost/127.0.0.1:37527 2024-12-06T03:50:12,276 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:50:12,276 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/data/data1/current/BP-893510456-172.17.0.2-1733456951750 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:50:12,277 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/cluster_aa788da9-02f8-ba50-9e4b-e7382a0ccdc1/data/data2/current/BP-893510456-172.17.0.2-1733456951750 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:50:12,277 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:50:12,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@549b308b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:50:12,284 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@516aaa5c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:50:12,284 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:50:12,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a75c30e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:50:12,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50c11e4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.log.dir/,STOPPED} 2024-12-06T03:50:12,290 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T03:50:12,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T03:50:12,325 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 206) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37527 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37527 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37527 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37527 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37527 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:37527 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37527 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37527 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=159 (was 142) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6956 (was 7440) 2024-12-06T03:50:12,332 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=159, ProcessCount=11, AvailableMemoryMB=6956 2024-12-06T03:50:12,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T03:50:12,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.log.dir so I do NOT create it in target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b 2024-12-06T03:50:12,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/70f1fb86-ff77-6053-3b41-3da21d0f6074/hadoop.tmp.dir so I do NOT create it in target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b 2024-12-06T03:50:12,333 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9, deleteOnExit=true 2024-12-06T03:50:12,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-06T03:50:12,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/test.cache.data in system properties and HBase conf 2024-12-06T03:50:12,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T03:50:12,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/hadoop.log.dir in system properties and HBase conf 2024-12-06T03:50:12,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T03:50:12,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T03:50:12,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-06T03:50:12,334 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T03:50:12,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:50:12,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T03:50:12,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T03:50:12,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:50:12,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T03:50:12,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T03:50:12,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T03:50:12,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:50:12,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T03:50:12,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/nfs.dump.dir in system properties and HBase conf 2024-12-06T03:50:12,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/java.io.tmpdir in system properties and HBase conf 2024-12-06T03:50:12,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T03:50:12,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T03:50:12,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T03:50:12,351 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:50:12,622 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:50:12,625 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:50:12,627 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:50:12,627 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:50:12,627 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T03:50:12,627 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:50:12,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@243c28a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:50:12,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@576e761c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:50:12,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:12,728 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49e8762f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/java.io.tmpdir/jetty-localhost-39851-hadoop-hdfs-3_4_1-tests_jar-_-any-3896299963617654570/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:50:12,729 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c6fbb1f{HTTP/1.1, (http/1.1)}{localhost:39851} 2024-12-06T03:50:12,729 INFO [Time-limited test {}] server.Server(415): Started @305594ms 2024-12-06T03:50:12,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:12,741 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T03:50:12,947 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:50:12,949 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:50:12,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:50:12,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:50:12,951 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:50:12,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30560100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:50:12,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@646f6454{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:50:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:50:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T03:50:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T03:50:12,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-06T03:50:13,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ac97d9e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/java.io.tmpdir/jetty-localhost-45395-hadoop-hdfs-3_4_1-tests_jar-_-any-831712427393965194/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:50:13,053 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f50471b{HTTP/1.1, (http/1.1)}{localhost:45395} 2024-12-06T03:50:13,053 INFO [Time-limited test {}] server.Server(415): Started @305918ms 2024-12-06T03:50:13,054 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:50:13,086 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T03:50:13,089 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T03:50:13,090 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T03:50:13,090 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T03:50:13,090 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T03:50:13,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11777c89{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/hadoop.log.dir/,AVAILABLE} 2024-12-06T03:50:13,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45f7f9ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T03:50:13,190 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1e8a55ee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/java.io.tmpdir/jetty-localhost-38285-hadoop-hdfs-3_4_1-tests_jar-_-any-12783938461840526229/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:50:13,190 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c21148f{HTTP/1.1, (http/1.1)}{localhost:38285} 2024-12-06T03:50:13,190 INFO [Time-limited test {}] server.Server(415): Started @306055ms 2024-12-06T03:50:13,191 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T03:50:13,539 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=9, created chunk count=9, reused chunk count=71, reuseRatio=88.75% 2024-12-06T03:50:13,540 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-06T03:50:13,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:13,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:13,759 WARN [Thread-2462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/data/data1/current/BP-11654519-172.17.0.2-1733457012354/current, will proceed with Du for space computation calculation, 2024-12-06T03:50:13,759 WARN [Thread-2463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/data/data2/current/BP-11654519-172.17.0.2-1733457012354/current, will proceed with Du for space computation calculation, 2024-12-06T03:50:13,781 WARN [Thread-2427 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:50:13,783 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc89fcd9a568fef4e with lease ID 0x5baaa4d983dee6ad: Processing first storage report for DS-a51b08fa-e3c0-4aba-bf03-0d69b5db39d3 from datanode DatanodeRegistration(127.0.0.1:44119, datanodeUuid=b5d3a403-b858-4dcd-be6f-0c702cf92f22, infoPort=38417, infoSecurePort=0, ipcPort=42911, storageInfo=lv=-57;cid=testClusterID;nsid=1620080178;c=1733457012354) 2024-12-06T03:50:13,783 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc89fcd9a568fef4e with lease ID 0x5baaa4d983dee6ad: from storage DS-a51b08fa-e3c0-4aba-bf03-0d69b5db39d3 node DatanodeRegistration(127.0.0.1:44119, datanodeUuid=b5d3a403-b858-4dcd-be6f-0c702cf92f22, infoPort=38417, infoSecurePort=0, ipcPort=42911, storageInfo=lv=-57;cid=testClusterID;nsid=1620080178;c=1733457012354), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:50:13,783 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc89fcd9a568fef4e with lease ID 0x5baaa4d983dee6ad: Processing first storage report for DS-a90f2310-903f-4f3c-b1f3-76a789044e64 from datanode DatanodeRegistration(127.0.0.1:44119, datanodeUuid=b5d3a403-b858-4dcd-be6f-0c702cf92f22, infoPort=38417, infoSecurePort=0, ipcPort=42911, storageInfo=lv=-57;cid=testClusterID;nsid=1620080178;c=1733457012354) 2024-12-06T03:50:13,783 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc89fcd9a568fef4e with lease ID 0x5baaa4d983dee6ad: from storage DS-a90f2310-903f-4f3c-b1f3-76a789044e64 node DatanodeRegistration(127.0.0.1:44119, datanodeUuid=b5d3a403-b858-4dcd-be6f-0c702cf92f22, infoPort=38417, infoSecurePort=0, ipcPort=42911, storageInfo=lv=-57;cid=testClusterID;nsid=1620080178;c=1733457012354), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:50:13,956 WARN [Thread-2474 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/data/data3/current/BP-11654519-172.17.0.2-1733457012354/current, will proceed with Du for space computation calculation, 2024-12-06T03:50:13,956 WARN [Thread-2475 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/data/data4/current/BP-11654519-172.17.0.2-1733457012354/current, will proceed with Du for space computation calculation, 2024-12-06T03:50:13,976 WARN [Thread-2450 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T03:50:13,978 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9598c871facd97e6 with lease ID 0x5baaa4d983dee6ae: Processing first storage report for DS-1c6dc1b2-64e1-4b0e-9462-5dbf6d3a23f7 from datanode DatanodeRegistration(127.0.0.1:46079, datanodeUuid=c7613104-e3d2-4b14-99d0-6b4bba1b2963, infoPort=41147, infoSecurePort=0, ipcPort=41505, storageInfo=lv=-57;cid=testClusterID;nsid=1620080178;c=1733457012354) 2024-12-06T03:50:13,978 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9598c871facd97e6 with lease ID 0x5baaa4d983dee6ae: from storage DS-1c6dc1b2-64e1-4b0e-9462-5dbf6d3a23f7 node DatanodeRegistration(127.0.0.1:46079, datanodeUuid=c7613104-e3d2-4b14-99d0-6b4bba1b2963, infoPort=41147, infoSecurePort=0, ipcPort=41505, storageInfo=lv=-57;cid=testClusterID;nsid=1620080178;c=1733457012354), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:50:13,978 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9598c871facd97e6 with lease ID 0x5baaa4d983dee6ae: Processing first storage report for DS-7b62a717-53c5-492f-82ea-f3c833b7c62a from datanode DatanodeRegistration(127.0.0.1:46079, datanodeUuid=c7613104-e3d2-4b14-99d0-6b4bba1b2963, infoPort=41147, infoSecurePort=0, ipcPort=41505, storageInfo=lv=-57;cid=testClusterID;nsid=1620080178;c=1733457012354) 2024-12-06T03:50:13,978 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9598c871facd97e6 with lease ID 0x5baaa4d983dee6ae: from storage DS-7b62a717-53c5-492f-82ea-f3c833b7c62a node DatanodeRegistration(127.0.0.1:46079, datanodeUuid=c7613104-e3d2-4b14-99d0-6b4bba1b2963, infoPort=41147, infoSecurePort=0, ipcPort=41505, storageInfo=lv=-57;cid=testClusterID;nsid=1620080178;c=1733457012354), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T03:50:14,018 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b 2024-12-06T03:50:14,024 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/zookeeper_0, clientPort=54196, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T03:50:14,025 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54196 2024-12-06T03:50:14,025 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:50:14,026 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:50:14,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:50:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741825_1001 (size=7) 2024-12-06T03:50:14,037 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8 with version=8 2024-12-06T03:50:14,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37783/user/jenkins/test-data/734c507d-fac7-0a53-9acc-5982cc68808a/hbase-staging 2024-12-06T03:50:14,039 INFO [Time-limited test {}] client.ConnectionUtils(128): master/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:50:14,039 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:50:14,039 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:50:14,039 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:50:14,039 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:50:14,039 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:50:14,039 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-06T03:50:14,039 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:50:14,040 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38057 2024-12-06T03:50:14,040 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38057 connecting to ZooKeeper ensemble=127.0.0.1:54196 2024-12-06T03:50:14,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:380570x0, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:50:14,094 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38057-0x101aa0c7fcc0000 connected 2024-12-06T03:50:14,160 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:50:14,161 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:50:14,163 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:50:14,163 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8, hbase.cluster.distributed=false 2024-12-06T03:50:14,164 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:50:14,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38057 2024-12-06T03:50:14,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38057 2024-12-06T03:50:14,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38057 2024-12-06T03:50:14,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38057 2024-12-06T03:50:14,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38057 2024-12-06T03:50:14,178 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/6f1b912b0816:0 server-side Connection retries=45 2024-12-06T03:50:14,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:50:14,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T03:50:14,178 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T03:50:14,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T03:50:14,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T03:50:14,178 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T03:50:14,179 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T03:50:14,179 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44033 2024-12-06T03:50:14,180 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44033 connecting to ZooKeeper ensemble=127.0.0.1:54196 2024-12-06T03:50:14,180 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:50:14,182 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:50:14,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440330x0, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T03:50:14,193 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:440330x0, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:50:14,193 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44033-0x101aa0c7fcc0001 connected 2024-12-06T03:50:14,193 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T03:50:14,194 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T03:50:14,194 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T03:50:14,195 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T03:50:14,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44033 2024-12-06T03:50:14,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44033 2024-12-06T03:50:14,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44033 2024-12-06T03:50:14,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44033 2024-12-06T03:50:14,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44033 2024-12-06T03:50:14,215 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6f1b912b0816:38057 2024-12-06T03:50:14,215 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/6f1b912b0816,38057,1733457014038 2024-12-06T03:50:14,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:50:14,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:50:14,226 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6f1b912b0816,38057,1733457014038 2024-12-06T03:50:14,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T03:50:14,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,235 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T03:50:14,235 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6f1b912b0816,38057,1733457014038 from backup master directory 2024-12-06T03:50:14,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6f1b912b0816,38057,1733457014038 2024-12-06T03:50:14,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:50:14,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T03:50:14,243 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:50:14,243 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6f1b912b0816,38057,1733457014038 2024-12-06T03:50:14,246 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/hbase.id] with ID: 55b33d39-3ad5-4aa9-afec-09aff61c8e05 2024-12-06T03:50:14,246 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/.tmp/hbase.id 2024-12-06T03:50:14,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:50:14,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741826_1002 (size=42) 2024-12-06T03:50:14,252 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/.tmp/hbase.id]:[hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/hbase.id] 2024-12-06T03:50:14,261 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:50:14,261 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-06T03:50:14,262 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-06T03:50:14,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:50:14,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741827_1003 (size=196) 2024-12-06T03:50:14,274 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T03:50:14,275 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T03:50:14,276 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:50:14,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:50:14,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741828_1004 (size=1189) 2024-12-06T03:50:14,289 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store 2024-12-06T03:50:14,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:50:14,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741829_1005 (size=34) 2024-12-06T03:50:14,295 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:50:14,295 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:50:14,295 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:14,295 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:14,295 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:50:14,295 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:14,295 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:14,295 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733457014295Disabling compacts and flushes for region at 1733457014295Disabling writes for close at 1733457014295Writing region close event to WAL at 1733457014295Closed at 1733457014295 2024-12-06T03:50:14,295 WARN [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/.initializing 2024-12-06T03:50:14,296 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/WALs/6f1b912b0816,38057,1733457014038 2024-12-06T03:50:14,298 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C38057%2C1733457014038, suffix=, logDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/WALs/6f1b912b0816,38057,1733457014038, archiveDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/oldWALs, maxLogs=10 2024-12-06T03:50:14,298 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C38057%2C1733457014038.1733457014298 2024-12-06T03:50:14,302 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/WALs/6f1b912b0816,38057,1733457014038/6f1b912b0816%2C38057%2C1733457014038.1733457014298 2024-12-06T03:50:14,306 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41147:41147),(127.0.0.1/127.0.0.1:38417:38417)] 2024-12-06T03:50:14,310 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:50:14,310 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:50:14,310 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,310 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,312 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T03:50:14,313 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:14,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:50:14,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T03:50:14,315 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:14,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:50:14,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,316 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T03:50:14,316 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:14,316 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:50:14,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T03:50:14,317 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:14,318 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T03:50:14,318 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,318 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,319 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,319 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,320 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,320 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T03:50:14,321 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T03:50:14,322 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:50:14,323 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=837567, jitterRate=0.06502245366573334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T03:50:14,323 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733457014310Initializing all the Stores at 1733457014311 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733457014311Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733457014312 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733457014312Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733457014312Cleaning up temporary data from old regions at 1733457014320 (+8 ms)Region opened successfully at 1733457014323 (+3 ms) 2024-12-06T03:50:14,326 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T03:50:14,329 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dcdde12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:50:14,330 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-06T03:50:14,330 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T03:50:14,330 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T03:50:14,331 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T03:50:14,331 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T03:50:14,332 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-06T03:50:14,332 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T03:50:14,334 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T03:50:14,335 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T03:50:14,368 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-06T03:50:14,368 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T03:50:14,369 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T03:50:14,376 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-06T03:50:14,376 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T03:50:14,377 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T03:50:14,384 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-06T03:50:14,385 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T03:50:14,392 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T03:50:14,394 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T03:50:14,401 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T03:50:14,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:50:14,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T03:50:14,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,410 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=6f1b912b0816,38057,1733457014038, sessionid=0x101aa0c7fcc0000, setting cluster-up flag (Was=false) 2024-12-06T03:50:14,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,451 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T03:50:14,452 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,38057,1733457014038 2024-12-06T03:50:14,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,492 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T03:50:14,493 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6f1b912b0816,38057,1733457014038 2024-12-06T03:50:14,495 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-06T03:50:14,496 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-06T03:50:14,496 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-06T03:50:14,496 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T03:50:14,497 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6f1b912b0816,38057,1733457014038 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T03:50:14,497 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:50:14,497 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:50:14,497 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:50:14,498 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6f1b912b0816:0, corePoolSize=5, maxPoolSize=5 2024-12-06T03:50:14,498 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6f1b912b0816:0, corePoolSize=10, maxPoolSize=10 2024-12-06T03:50:14,498 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,498 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:50:14,498 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,499 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:50:14,499 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-06T03:50:14,500 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:14,500 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T03:50:14,503 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733457044503 2024-12-06T03:50:14,503 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T03:50:14,503 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T03:50:14,503 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T03:50:14,503 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T03:50:14,503 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T03:50:14,503 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T03:50:14,503 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,507 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T03:50:14,507 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T03:50:14,507 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T03:50:14,507 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T03:50:14,507 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T03:50:14,508 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(746): ClusterId : 55b33d39-3ad5-4aa9-afec-09aff61c8e05 2024-12-06T03:50:14,508 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T03:50:14,510 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733457014507,5,FailOnTimeoutGroup] 2024-12-06T03:50:14,511 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733457014510,5,FailOnTimeoutGroup] 2024-12-06T03:50:14,511 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,511 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T03:50:14,511 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,511 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:50:14,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741831_1007 (size=1321) 2024-12-06T03:50:14,516 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-06T03:50:14,516 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8 2024-12-06T03:50:14,518 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T03:50:14,518 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T03:50:14,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:50:14,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741832_1008 (size=32) 2024-12-06T03:50:14,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:50:14,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:50:14,526 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T03:50:14,527 DEBUG [RS:0;6f1b912b0816:44033 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47ba739a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6f1b912b0816/172.17.0.2:0 2024-12-06T03:50:14,527 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:50:14,527 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:14,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:50:14,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:50:14,528 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:50:14,528 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:14,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:50:14,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:50:14,530 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:50:14,530 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:14,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:50:14,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:50:14,531 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:50:14,531 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:14,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:50:14,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:50:14,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740 2024-12-06T03:50:14,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740 2024-12-06T03:50:14,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:50:14,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:50:14,534 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:50:14,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:50:14,538 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T03:50:14,538 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=754036, jitterRate=-0.0411946177482605}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:50:14,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733457014525Initializing all the Stores at 1733457014526 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733457014526Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733457014526Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733457014526Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733457014526Cleaning up temporary data from old regions at 1733457014534 (+8 ms)Region opened successfully at 1733457014539 (+5 ms) 2024-12-06T03:50:14,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:50:14,539 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:50:14,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:50:14,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:50:14,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:50:14,539 DEBUG [RS:0;6f1b912b0816:44033 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6f1b912b0816:44033 2024-12-06T03:50:14,539 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-06T03:50:14,539 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-06T03:50:14,539 DEBUG [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-06T03:50:14,539 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:50:14,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733457014539Disabling compacts and flushes for region at 1733457014539Disabling writes for close at 1733457014539Writing region close event to WAL at 1733457014539Closed at 1733457014539 2024-12-06T03:50:14,540 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(2659): reportForDuty to master=6f1b912b0816,38057,1733457014038 with port=44033, startcode=1733457014178 2024-12-06T03:50:14,540 DEBUG [RS:0;6f1b912b0816:44033 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T03:50:14,540 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:50:14,540 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-06T03:50:14,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T03:50:14,542 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:50:14,543 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39155, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T03:50:14,544 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38057 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 6f1b912b0816,44033,1733457014178 2024-12-06T03:50:14,544 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T03:50:14,544 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38057 {}] master.ServerManager(517): Registering regionserver=6f1b912b0816,44033,1733457014178 2024-12-06T03:50:14,545 DEBUG [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8 2024-12-06T03:50:14,545 DEBUG [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33273 2024-12-06T03:50:14,545 DEBUG [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-06T03:50:14,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:50:14,551 DEBUG [RS:0;6f1b912b0816:44033 {}] zookeeper.ZKUtil(111): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6f1b912b0816,44033,1733457014178 2024-12-06T03:50:14,551 WARN [RS:0;6f1b912b0816:44033 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T03:50:14,551 INFO [RS:0;6f1b912b0816:44033 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:50:14,551 DEBUG [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/6f1b912b0816,44033,1733457014178 2024-12-06T03:50:14,552 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6f1b912b0816,44033,1733457014178] 2024-12-06T03:50:14,554 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T03:50:14,555 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T03:50:14,556 INFO [RS:0;6f1b912b0816:44033 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T03:50:14,556 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,556 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-06T03:50:14,557 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-06T03:50:14,557 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6f1b912b0816:0, corePoolSize=2, maxPoolSize=2 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6f1b912b0816:0, corePoolSize=1, maxPoolSize=1 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:50:14,557 DEBUG [RS:0;6f1b912b0816:44033 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6f1b912b0816:0, corePoolSize=3, maxPoolSize=3 2024-12-06T03:50:14,557 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,557 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,557 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,557 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,557 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,557 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44033,1733457014178-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:50:14,569 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T03:50:14,570 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,44033,1733457014178-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,570 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,570 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.Replication(171): 6f1b912b0816,44033,1733457014178 started 2024-12-06T03:50:14,582 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:14,582 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(1482): Serving as 6f1b912b0816,44033,1733457014178, RpcServer on 6f1b912b0816/172.17.0.2:44033, sessionid=0x101aa0c7fcc0001 2024-12-06T03:50:14,582 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T03:50:14,582 DEBUG [RS:0;6f1b912b0816:44033 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6f1b912b0816,44033,1733457014178 2024-12-06T03:50:14,583 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,44033,1733457014178' 2024-12-06T03:50:14,583 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T03:50:14,584 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T03:50:14,584 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T03:50:14,584 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T03:50:14,584 DEBUG [RS:0;6f1b912b0816:44033 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6f1b912b0816,44033,1733457014178 2024-12-06T03:50:14,584 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6f1b912b0816,44033,1733457014178' 2024-12-06T03:50:14,584 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T03:50:14,584 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T03:50:14,584 DEBUG [RS:0;6f1b912b0816:44033 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T03:50:14,584 INFO [RS:0;6f1b912b0816:44033 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T03:50:14,584 INFO [RS:0;6f1b912b0816:44033 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T03:50:14,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:14,686 INFO [RS:0;6f1b912b0816:44033 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C44033%2C1733457014178, suffix=, logDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/6f1b912b0816,44033,1733457014178, archiveDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/oldWALs, maxLogs=32 2024-12-06T03:50:14,687 INFO [RS:0;6f1b912b0816:44033 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C44033%2C1733457014178.1733457014686 2024-12-06T03:50:14,692 INFO [RS:0;6f1b912b0816:44033 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/6f1b912b0816,44033,1733457014178/6f1b912b0816%2C44033%2C1733457014178.1733457014686 2024-12-06T03:50:14,694 WARN [6f1b912b0816:38057 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-06T03:50:14,697 DEBUG [RS:0;6f1b912b0816:44033 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38417:38417),(127.0.0.1/127.0.0.1:41147:41147)] 2024-12-06T03:50:14,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:14,945 DEBUG [6f1b912b0816:38057 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T03:50:14,945 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6f1b912b0816,44033,1733457014178 2024-12-06T03:50:14,946 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,44033,1733457014178, state=OPENING 2024-12-06T03:50:14,966 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T03:50:14,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:14,976 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:50:14,976 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:50:14,976 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T03:50:14,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,44033,1733457014178}] 2024-12-06T03:50:15,129 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T03:50:15,131 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45793, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T03:50:15,134 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-06T03:50:15,134 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:50:15,136 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6f1b912b0816%2C44033%2C1733457014178.meta, suffix=.meta, logDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/6f1b912b0816,44033,1733457014178, archiveDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/oldWALs, maxLogs=32 2024-12-06T03:50:15,136 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 6f1b912b0816%2C44033%2C1733457014178.meta.1733457015136.meta 2024-12-06T03:50:15,144 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/6f1b912b0816,44033,1733457014178/6f1b912b0816%2C44033%2C1733457014178.meta.1733457015136.meta 2024-12-06T03:50:15,147 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41147:41147),(127.0.0.1/127.0.0.1:38417:38417)] 2024-12-06T03:50:15,150 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T03:50:15,151 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T03:50:15,151 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T03:50:15,151 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T03:50:15,151 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T03:50:15,151 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T03:50:15,151 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-06T03:50:15,151 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-06T03:50:15,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T03:50:15,153 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T03:50:15,154 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:15,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:50:15,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-06T03:50:15,154 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-06T03:50:15,154 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:15,155 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:50:15,155 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T03:50:15,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T03:50:15,155 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:15,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:50:15,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T03:50:15,157 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T03:50:15,157 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T03:50:15,157 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T03:50:15,157 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-06T03:50:15,158 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740 2024-12-06T03:50:15,159 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740 2024-12-06T03:50:15,159 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-06T03:50:15,159 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-06T03:50:15,160 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T03:50:15,161 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-06T03:50:15,161 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748073, jitterRate=-0.048777177929878235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T03:50:15,161 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-06T03:50:15,162 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733457015151Writing region info on filesystem at 1733457015151Initializing all the Stores at 1733457015152 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733457015152Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733457015153 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733457015153Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733457015153Cleaning up temporary data from old regions at 1733457015159 (+6 ms)Running coprocessor post-open hooks at 1733457015161 (+2 ms)Region opened successfully at 1733457015162 (+1 ms) 2024-12-06T03:50:15,163 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733457015129 2024-12-06T03:50:15,165 DEBUG [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T03:50:15,165 INFO [RS_OPEN_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-06T03:50:15,165 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6f1b912b0816,44033,1733457014178 2024-12-06T03:50:15,166 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6f1b912b0816,44033,1733457014178, state=OPEN 2024-12-06T03:50:15,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:50:15,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T03:50:15,206 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=6f1b912b0816,44033,1733457014178 2024-12-06T03:50:15,206 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:50:15,206 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T03:50:15,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T03:50:15,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=6f1b912b0816,44033,1733457014178 in 230 msec 2024-12-06T03:50:15,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T03:50:15,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 667 msec 2024-12-06T03:50:15,211 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-06T03:50:15,211 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-06T03:50:15,212 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:50:15,212 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,44033,1733457014178, seqNum=-1] 2024-12-06T03:50:15,212 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:50:15,214 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36355, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:50:15,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 722 msec 2024-12-06T03:50:15,219 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733457015219, completionTime=-1 2024-12-06T03:50:15,219 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T03:50:15,219 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-06T03:50:15,221 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-06T03:50:15,221 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733457075221 2024-12-06T03:50:15,221 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733457135221 2024-12-06T03:50:15,221 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-06T03:50:15,222 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,38057,1733457014038-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:15,222 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,38057,1733457014038-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:15,222 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,38057,1733457014038-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:15,222 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6f1b912b0816:38057, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:15,222 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:15,222 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:15,224 DEBUG [master/6f1b912b0816:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-06T03:50:15,226 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.983sec 2024-12-06T03:50:15,226 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T03:50:15,226 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T03:50:15,226 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T03:50:15,226 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T03:50:15,226 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T03:50:15,226 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,38057,1733457014038-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T03:50:15,226 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,38057,1733457014038-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T03:50:15,228 DEBUG [master/6f1b912b0816:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-06T03:50:15,228 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T03:50:15,228 INFO [master/6f1b912b0816:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6f1b912b0816,38057,1733457014038-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T03:50:15,308 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5098cf53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:50:15,308 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 6f1b912b0816,38057,-1 for getting cluster id 2024-12-06T03:50:15,308 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-06T03:50:15,310 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '55b33d39-3ad5-4aa9-afec-09aff61c8e05' 2024-12-06T03:50:15,310 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-06T03:50:15,310 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "55b33d39-3ad5-4aa9-afec-09aff61c8e05" 2024-12-06T03:50:15,310 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@208e3e45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:50:15,310 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [6f1b912b0816,38057,-1] 2024-12-06T03:50:15,311 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-06T03:50:15,311 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:15,312 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54096, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-06T03:50:15,312 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48ba57bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T03:50:15,313 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-06T03:50:15,314 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=6f1b912b0816,44033,1733457014178, seqNum=-1] 2024-12-06T03:50:15,314 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T03:50:15,315 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42724, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T03:50:15,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=6f1b912b0816,38057,1733457014038 2024-12-06T03:50:15,317 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T03:50:15,319 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-06T03:50:15,319 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T03:50:15,321 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/test.com,8080,1, archiveDir=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/oldWALs, maxLogs=32 2024-12-06T03:50:15,321 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733457015321 2024-12-06T03:50:15,326 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/test.com,8080,1/test.com%2C8080%2C1.1733457015321 2024-12-06T03:50:15,326 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41147:41147),(127.0.0.1/127.0.0.1:38417:38417)] 2024-12-06T03:50:15,331 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733457015331 2024-12-06T03:50:15,336 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,336 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,336 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,336 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,336 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,336 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/test.com,8080,1/test.com%2C8080%2C1.1733457015321 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/test.com,8080,1/test.com%2C8080%2C1.1733457015331 2024-12-06T03:50:15,339 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38417:38417),(127.0.0.1/127.0.0.1:41147:41147)] 2024-12-06T03:50:15,339 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/test.com,8080,1/test.com%2C8080%2C1.1733457015321 is not closed yet, will try archiving it next time 2024-12-06T03:50:15,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741835_1011 (size=93) 2024-12-06T03:50:15,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741835_1011 (size=93) 2024-12-06T03:50:15,342 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,343 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,343 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,343 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,343 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,347 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/WALs/test.com,8080,1/test.com%2C8080%2C1.1733457015321 to hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/oldWALs/test.com%2C8080%2C1.1733457015321 2024-12-06T03:50:15,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741836_1012 (size=93) 2024-12-06T03:50:15,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741836_1012 (size=93) 2024-12-06T03:50:15,350 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/oldWALs 2024-12-06T03:50:15,350 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733457015331) 2024-12-06T03:50:15,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-06T03:50:15,350 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:50:15,350 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:50:15,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:15,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:15,350 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-06T03:50:15,350 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T03:50:15,350 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=236212020, stopped=false 2024-12-06T03:50:15,351 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=6f1b912b0816,38057,1733457014038 2024-12-06T03:50:15,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:50:15,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:15,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T03:50:15,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:15,359 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:50:15,359 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-06T03:50:15,359 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:50:15,359 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:15,360 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:50:15,360 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '6f1b912b0816,44033,1733457014178' ***** 2024-12-06T03:50:15,360 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-06T03:50:15,360 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T03:50:15,360 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T03:50:15,360 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-06T03:50:15,360 INFO [RS:0;6f1b912b0816:44033 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T03:50:15,360 INFO [RS:0;6f1b912b0816:44033 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T03:50:15,360 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(959): stopping server 6f1b912b0816,44033,1733457014178 2024-12-06T03:50:15,360 INFO [RS:0;6f1b912b0816:44033 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:50:15,360 INFO [RS:0;6f1b912b0816:44033 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;6f1b912b0816:44033. 2024-12-06T03:50:15,360 DEBUG [RS:0;6f1b912b0816:44033 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-06T03:50:15,360 DEBUG [RS:0;6f1b912b0816:44033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:15,361 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T03:50:15,361 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T03:50:15,361 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T03:50:15,361 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-06T03:50:15,361 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-06T03:50:15,361 DEBUG [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-06T03:50:15,361 DEBUG [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-06T03:50:15,361 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-06T03:50:15,361 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-06T03:50:15,361 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-06T03:50:15,361 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T03:50:15,361 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T03:50:15,361 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-06T03:50:15,375 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740/.tmp/ns/cf30df78d422481abe79fbce44e37365 is 43, key is default/ns:d/1733457015214/Put/seqid=0 2024-12-06T03:50:15,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741837_1013 (size=5153) 2024-12-06T03:50:15,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741837_1013 (size=5153) 2024-12-06T03:50:15,383 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740/.tmp/ns/cf30df78d422481abe79fbce44e37365 2024-12-06T03:50:15,389 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740/.tmp/ns/cf30df78d422481abe79fbce44e37365 as hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740/ns/cf30df78d422481abe79fbce44e37365 2024-12-06T03:50:15,394 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740/ns/cf30df78d422481abe79fbce44e37365, entries=2, sequenceid=6, filesize=5.0 K 2024-12-06T03:50:15,395 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false 2024-12-06T03:50:15,395 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T03:50:15,400 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T03:50:15,400 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T03:50:15,400 INFO [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-06T03:50:15,400 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733457015361Running coprocessor pre-close hooks at 1733457015361Disabling compacts and flushes for region at 1733457015361Disabling writes for close at 1733457015361Obtaining lock to block concurrent updates at 1733457015361Preparing flush snapshotting stores in 1588230740 at 1733457015361Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733457015361Flushing stores of hbase:meta,,1.1588230740 at 1733457015362 (+1 ms)Flushing 1588230740/ns: creating writer at 1733457015362Flushing 1588230740/ns: appending metadata at 1733457015374 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1733457015375 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15670f67: reopening flushed file at 1733457015388 (+13 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false at 1733457015395 (+7 ms)Writing region close event to WAL at 1733457015396 (+1 ms)Running coprocessor post-close hooks at 1733457015400 (+4 ms)Closed at 1733457015400 2024-12-06T03:50:15,401 DEBUG [RS_CLOSE_META-regionserver/6f1b912b0816:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T03:50:15,561 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(976): stopping server 6f1b912b0816,44033,1733457014178; all regions closed. 2024-12-06T03:50:15,562 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,562 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,562 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,562 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,562 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741834_1010 (size=1152) 2024-12-06T03:50:15,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741834_1010 (size=1152) 2024-12-06T03:50:15,568 DEBUG [RS:0;6f1b912b0816:44033 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/oldWALs 2024-12-06T03:50:15,569 INFO [RS:0;6f1b912b0816:44033 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C44033%2C1733457014178.meta:.meta(num 1733457015136) 2024-12-06T03:50:15,569 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,569 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,569 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,569 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,569 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:15,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741833_1009 (size=93) 2024-12-06T03:50:15,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741833_1009 (size=93) 2024-12-06T03:50:15,602 INFO [regionserver/6f1b912b0816:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T03:50:15,603 INFO [regionserver/6f1b912b0816:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T03:50:15,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37887,1733456817153/6f1b912b0816%2C37887%2C1733456817153.1733456817388 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:15,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46387/user/jenkins/test-data/5414046d-1734-a014-b8cb-26952fade726/WALs/6f1b912b0816,37979,1733456815684/6f1b912b0816%2C37979%2C1733456815684.meta.1733456816885.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T03:50:15,974 DEBUG [RS:0;6f1b912b0816:44033 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/oldWALs 2024-12-06T03:50:15,974 INFO [RS:0;6f1b912b0816:44033 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 6f1b912b0816%2C44033%2C1733457014178:(num 1733457014686) 2024-12-06T03:50:15,974 DEBUG [RS:0;6f1b912b0816:44033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T03:50:15,974 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T03:50:15,974 INFO [RS:0;6f1b912b0816:44033 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:50:15,974 INFO [RS:0;6f1b912b0816:44033 {}] hbase.ChoreService(370): Chore service for: regionserver/6f1b912b0816:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-06T03:50:15,974 INFO [RS:0;6f1b912b0816:44033 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:50:15,974 INFO [RS:0;6f1b912b0816:44033 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44033 2024-12-06T03:50:15,975 INFO [regionserver/6f1b912b0816:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:50:16,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T03:50:16,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6f1b912b0816,44033,1733457014178 2024-12-06T03:50:16,017 INFO [RS:0;6f1b912b0816:44033 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:50:16,018 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6f1b912b0816,44033,1733457014178] 2024-12-06T03:50:16,034 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/6f1b912b0816,44033,1733457014178 already deleted, retry=false 2024-12-06T03:50:16,034 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 6f1b912b0816,44033,1733457014178 expired; onlineServers=0 2024-12-06T03:50:16,034 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '6f1b912b0816,38057,1733457014038' ***** 2024-12-06T03:50:16,034 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T03:50:16,034 INFO [M:0;6f1b912b0816:38057 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-06T03:50:16,034 INFO [M:0;6f1b912b0816:38057 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-06T03:50:16,034 DEBUG [M:0;6f1b912b0816:38057 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T03:50:16,034 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T03:50:16,034 DEBUG [M:0;6f1b912b0816:38057 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T03:50:16,034 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733457014510 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.small.0-1733457014510,5,FailOnTimeoutGroup] 2024-12-06T03:50:16,034 DEBUG [master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733457014507 {}] cleaner.HFileCleaner(306): Exit Thread[master/6f1b912b0816:0:becomeActiveMaster-HFileCleaner.large.0-1733457014507,5,FailOnTimeoutGroup] 2024-12-06T03:50:16,034 INFO [M:0;6f1b912b0816:38057 {}] hbase.ChoreService(370): Chore service for: master/6f1b912b0816:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-06T03:50:16,034 INFO [M:0;6f1b912b0816:38057 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-06T03:50:16,034 DEBUG [M:0;6f1b912b0816:38057 {}] master.HMaster(1795): Stopping service threads 2024-12-06T03:50:16,034 INFO [M:0;6f1b912b0816:38057 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T03:50:16,035 INFO [M:0;6f1b912b0816:38057 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-06T03:50:16,035 INFO [M:0;6f1b912b0816:38057 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T03:50:16,035 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T03:50:16,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T03:50:16,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T03:50:16,042 DEBUG [M:0;6f1b912b0816:38057 {}] zookeeper.ZKUtil(347): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T03:50:16,042 WARN [M:0;6f1b912b0816:38057 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T03:50:16,043 INFO [M:0;6f1b912b0816:38057 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/.lastflushedseqids 2024-12-06T03:50:16,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741838_1014 (size=99) 2024-12-06T03:50:16,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741838_1014 (size=99) 2024-12-06T03:50:16,049 INFO [M:0;6f1b912b0816:38057 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-06T03:50:16,049 INFO [M:0;6f1b912b0816:38057 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T03:50:16,050 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T03:50:16,050 INFO [M:0;6f1b912b0816:38057 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:16,050 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:16,050 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T03:50:16,050 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:16,050 INFO [M:0;6f1b912b0816:38057 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-06T03:50:16,070 DEBUG [M:0;6f1b912b0816:38057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/182735cd6cd84bba9487b92dd4af4a82 is 82, key is hbase:meta,,1/info:regioninfo/1733457015165/Put/seqid=0 2024-12-06T03:50:16,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741839_1015 (size=5672) 2024-12-06T03:50:16,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741839_1015 (size=5672) 2024-12-06T03:50:16,075 INFO [M:0;6f1b912b0816:38057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/182735cd6cd84bba9487b92dd4af4a82 2024-12-06T03:50:16,093 DEBUG [M:0;6f1b912b0816:38057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9056511c021408c808c411dced2dab8 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733457015218/Put/seqid=0 2024-12-06T03:50:16,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741840_1016 (size=5275) 2024-12-06T03:50:16,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741840_1016 (size=5275) 2024-12-06T03:50:16,103 INFO [M:0;6f1b912b0816:38057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9056511c021408c808c411dced2dab8 2024-12-06T03:50:16,126 DEBUG [M:0;6f1b912b0816:38057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3ea5dd24a9f459390e24e50cbf0fb52 is 69, key is 6f1b912b0816,44033,1733457014178/rs:state/1733457014544/Put/seqid=0 2024-12-06T03:50:16,126 INFO [RS:0;6f1b912b0816:44033 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:50:16,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:50:16,126 INFO [RS:0;6f1b912b0816:44033 {}] regionserver.HRegionServer(1031): Exiting; stopping=6f1b912b0816,44033,1733457014178; zookeeper connection closed. 2024-12-06T03:50:16,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44033-0x101aa0c7fcc0001, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:50:16,126 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2248a869 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2248a869 2024-12-06T03:50:16,126 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T03:50:16,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741841_1017 (size=5156) 2024-12-06T03:50:16,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741841_1017 (size=5156) 2024-12-06T03:50:16,130 INFO [M:0;6f1b912b0816:38057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3ea5dd24a9f459390e24e50cbf0fb52 2024-12-06T03:50:16,147 DEBUG [M:0;6f1b912b0816:38057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db0b24941dc7485a8d6f1173b5f5ab9a is 52, key is load_balancer_on/state:d/1733457015318/Put/seqid=0 2024-12-06T03:50:16,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741842_1018 (size=5056) 2024-12-06T03:50:16,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741842_1018 (size=5056) 2024-12-06T03:50:16,160 INFO [M:0;6f1b912b0816:38057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db0b24941dc7485a8d6f1173b5f5ab9a 2024-12-06T03:50:16,165 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/182735cd6cd84bba9487b92dd4af4a82 as hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/182735cd6cd84bba9487b92dd4af4a82 2024-12-06T03:50:16,169 INFO [M:0;6f1b912b0816:38057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/182735cd6cd84bba9487b92dd4af4a82, entries=8, sequenceid=29, filesize=5.5 K 2024-12-06T03:50:16,170 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9056511c021408c808c411dced2dab8 as hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c9056511c021408c808c411dced2dab8 2024-12-06T03:50:16,174 INFO [M:0;6f1b912b0816:38057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c9056511c021408c808c411dced2dab8, entries=3, sequenceid=29, filesize=5.2 K 2024-12-06T03:50:16,175 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3ea5dd24a9f459390e24e50cbf0fb52 as hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3ea5dd24a9f459390e24e50cbf0fb52 2024-12-06T03:50:16,179 INFO [M:0;6f1b912b0816:38057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3ea5dd24a9f459390e24e50cbf0fb52, entries=1, sequenceid=29, filesize=5.0 K 2024-12-06T03:50:16,180 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db0b24941dc7485a8d6f1173b5f5ab9a as hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/db0b24941dc7485a8d6f1173b5f5ab9a 2024-12-06T03:50:16,184 INFO [M:0;6f1b912b0816:38057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33273/user/jenkins/test-data/c2a28c12-6da5-51d7-2208-661cc4cb36a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/db0b24941dc7485a8d6f1173b5f5ab9a, entries=1, sequenceid=29, filesize=4.9 K 2024-12-06T03:50:16,185 INFO [M:0;6f1b912b0816:38057 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=29, compaction requested=false 2024-12-06T03:50:16,187 INFO [M:0;6f1b912b0816:38057 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T03:50:16,187 DEBUG [M:0;6f1b912b0816:38057 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733457016050Disabling compacts and flushes for region at 1733457016050Disabling writes for close at 1733457016050Obtaining lock to block concurrent updates at 1733457016050Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733457016050Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733457016050Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733457016051 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733457016051Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733457016069 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733457016069Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733457016079 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733457016092 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733457016093 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733457016107 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733457016125 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733457016125Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733457016134 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733457016147 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733457016147Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34f26d7c: reopening flushed file at 1733457016164 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c6a1c8c: reopening flushed file at 1733457016169 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e8b1b2: reopening flushed file at 1733457016174 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c56d72a: reopening flushed file at 1733457016179 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=29, compaction requested=false at 1733457016185 (+6 ms)Writing region close event to WAL at 1733457016186 (+1 ms)Closed at 1733457016186 2024-12-06T03:50:16,187 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:16,187 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:16,187 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:16,187 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:16,187 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-06T03:50:16,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44119 is added to blk_1073741830_1006 (size=10311) 2024-12-06T03:50:16,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46079 is added to blk_1073741830_1006 (size=10311) 2024-12-06T03:50:16,190 INFO [M:0;6f1b912b0816:38057 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-06T03:50:16,190 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-06T03:50:16,190 INFO [M:0;6f1b912b0816:38057 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38057 2024-12-06T03:50:16,190 INFO [M:0;6f1b912b0816:38057 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-06T03:50:16,309 INFO [M:0;6f1b912b0816:38057 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-06T03:50:16,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:50:16,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38057-0x101aa0c7fcc0000, quorum=127.0.0.1:54196, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T03:50:16,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1e8a55ee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:50:16,311 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c21148f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:50:16,311 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:50:16,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45f7f9ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:50:16,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11777c89{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/hadoop.log.dir/,STOPPED} 2024-12-06T03:50:16,313 WARN [BP-11654519-172.17.0.2-1733457012354 heartbeating to localhost/127.0.0.1:33273 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:50:16,313 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:50:16,313 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:50:16,313 WARN [BP-11654519-172.17.0.2-1733457012354 heartbeating to localhost/127.0.0.1:33273 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-11654519-172.17.0.2-1733457012354 (Datanode Uuid c7613104-e3d2-4b14-99d0-6b4bba1b2963) service to localhost/127.0.0.1:33273 2024-12-06T03:50:16,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/data/data3/current/BP-11654519-172.17.0.2-1733457012354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:50:16,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/data/data4/current/BP-11654519-172.17.0.2-1733457012354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:50:16,314 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:50:16,315 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ac97d9e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T03:50:16,316 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f50471b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:50:16,316 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:50:16,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@646f6454{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:50:16,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30560100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/hadoop.log.dir/,STOPPED} 2024-12-06T03:50:16,317 WARN [BP-11654519-172.17.0.2-1733457012354 heartbeating to localhost/127.0.0.1:33273 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T03:50:16,317 WARN [BP-11654519-172.17.0.2-1733457012354 heartbeating to localhost/127.0.0.1:33273 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-11654519-172.17.0.2-1733457012354 (Datanode Uuid b5d3a403-b858-4dcd-be6f-0c702cf92f22) service to localhost/127.0.0.1:33273 2024-12-06T03:50:16,317 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T03:50:16,317 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T03:50:16,317 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/data/data1/current/BP-11654519-172.17.0.2-1733457012354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:50:16,317 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/cluster_43aa53cc-d751-ea40-5336-57e2738443b9/data/data2/current/BP-11654519-172.17.0.2-1733457012354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T03:50:16,317 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T03:50:16,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49e8762f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T03:50:16,326 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c6fbb1f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T03:50:16,326 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T03:50:16,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@576e761c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T03:50:16,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@243c28a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/464020e9-08f4-5a18-a07a-a0b0d8b90f0b/hadoop.log.dir/,STOPPED} 2024-12-06T03:50:16,331 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-06T03:50:16,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-06T03:50:16,360 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 229) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33273 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33273 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:33273 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:33273 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33273 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:33273 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:33273 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33273 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=535 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=155 (was 159), ProcessCount=11 (was 11), AvailableMemoryMB=6944 (was 6956)