2024-11-18 02:28:22,438 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-18 02:28:22,456 main DEBUG Took 0.015604 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-18 02:28:22,456 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-18 02:28:22,457 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-18 02:28:22,458 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-18 02:28:22,460 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,469 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-18 02:28:22,484 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,486 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,487 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,487 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,487 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,488 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,489 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,489 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,490 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,490 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,491 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,492 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,492 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,493 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,494 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,494 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,495 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,495 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,496 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,496 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,497 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,497 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,498 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,498 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 02:28:22,499 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,499 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-18 02:28:22,501 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 02:28:22,503 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-18 02:28:22,505 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-18 02:28:22,506 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-18 02:28:22,507 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-18 02:28:22,508 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-18 02:28:22,520 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-18 02:28:22,524 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-18 02:28:22,526 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-18 02:28:22,527 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-18 02:28:22,527 main DEBUG createAppenders(={Console}) 2024-11-18 02:28:22,528 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-18 02:28:22,529 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-18 02:28:22,529 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-18 02:28:22,530 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-18 02:28:22,530 main DEBUG OutputStream closed 2024-11-18 02:28:22,531 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-18 02:28:22,531 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-18 02:28:22,531 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-18 02:28:22,624 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-18 02:28:22,627 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-18 02:28:22,629 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-18 02:28:22,631 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-18 02:28:22,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-18 02:28:22,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-18 02:28:22,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-18 02:28:22,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-18 02:28:22,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-18 02:28:22,634 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-18 02:28:22,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-18 02:28:22,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-18 02:28:22,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-18 02:28:22,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-18 02:28:22,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-18 02:28:22,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-18 02:28:22,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-18 02:28:22,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-18 02:28:22,642 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18 02:28:22,642 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-18 02:28:22,642 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-18 02:28:22,643 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-18T02:28:22,993 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8 2024-11-18 02:28:22,997 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-18 02:28:22,998 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18T02:28:23,008 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-18T02:28:23,049 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=228, ProcessCount=11, AvailableMemoryMB=3670 2024-11-18T02:28:23,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T02:28:23,068 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76, deleteOnExit=true 2024-11-18T02:28:23,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T02:28:23,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/test.cache.data in system properties and HBase conf 2024-11-18T02:28:23,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T02:28:23,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.log.dir in system properties and HBase conf 2024-11-18T02:28:23,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T02:28:23,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T02:28:23,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T02:28:23,173 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-18T02:28:23,320 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T02:28:23,325 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:28:23,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:28:23,327 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T02:28:23,327 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:28:23,328 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T02:28:23,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T02:28:23,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:28:23,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:28:23,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T02:28:23,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/nfs.dump.dir in system properties and HBase conf 2024-11-18T02:28:23,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/java.io.tmpdir in system properties and HBase conf 2024-11-18T02:28:23,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:28:23,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T02:28:23,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T02:28:23,995 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:28:24,366 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-18T02:28:24,471 INFO [Time-limited test {}] log.Log(170): Logging initialized @2926ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-18T02:28:24,560 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:28:24,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:28:24,654 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:28:24,654 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:28:24,656 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:28:24,673 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:28:24,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:28:24,677 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:28:24,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/java.io.tmpdir/jetty-localhost-37269-hadoop-hdfs-3_4_1-tests_jar-_-any-5773208022187636644/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:28:24,917 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:37269} 2024-11-18T02:28:24,918 INFO [Time-limited test {}] server.Server(415): Started @3374ms 2024-11-18T02:28:24,946 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:28:25,342 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:28:25,351 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:28:25,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:28:25,353 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:28:25,354 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:28:25,357 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:28:25,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:28:25,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c2fdbac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/java.io.tmpdir/jetty-localhost-36065-hadoop-hdfs-3_4_1-tests_jar-_-any-15873527190149156505/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:28:25,488 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:36065} 2024-11-18T02:28:25,489 INFO [Time-limited test {}] server.Server(415): Started @3945ms 2024-11-18T02:28:25,545 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:28:25,709 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:28:25,714 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:28:25,716 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:28:25,717 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:28:25,717 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:28:25,719 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:28:25,720 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:28:25,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1467625d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/java.io.tmpdir/jetty-localhost-42357-hadoop-hdfs-3_4_1-tests_jar-_-any-13129804008596078046/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:28:25,881 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:42357} 2024-11-18T02:28:25,882 INFO [Time-limited test {}] server.Server(415): Started @4338ms 2024-11-18T02:28:25,887 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:28:26,128 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/data/data3/current/BP-1085259333-172.17.0.2-1731896904097/current, will proceed with Du for space computation calculation, 2024-11-18T02:28:26,138 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/data/data4/current/BP-1085259333-172.17.0.2-1731896904097/current, will proceed with Du for space computation calculation, 2024-11-18T02:28:26,158 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/data/data1/current/BP-1085259333-172.17.0.2-1731896904097/current, will proceed with Du for space computation calculation, 2024-11-18T02:28:26,164 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/data/data2/current/BP-1085259333-172.17.0.2-1731896904097/current, will proceed with Du for space computation calculation, 2024-11-18T02:28:26,253 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:28:26,253 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:28:26,351 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd8f2125ddb80296 with lease ID 0xe41a4efa8f73ecbd: Processing first storage report for DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b from datanode DatanodeRegistration(127.0.0.1:46563, datanodeUuid=d0d61583-8fe2-420c-bddd-55f1f47f3ea9, infoPort=37667, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=2000708287;c=1731896904097) 2024-11-18T02:28:26,353 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd8f2125ddb80296 with lease ID 0xe41a4efa8f73ecbd: from storage DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b node DatanodeRegistration(127.0.0.1:46563, datanodeUuid=d0d61583-8fe2-420c-bddd-55f1f47f3ea9, infoPort=37667, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=2000708287;c=1731896904097), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T02:28:26,353 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6c5c3845211ad689 with lease ID 0xe41a4efa8f73ecbe: Processing first storage report for DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce from datanode DatanodeRegistration(127.0.0.1:33973, datanodeUuid=15fc9655-10b9-43cd-a1fc-f70e1b50c326, infoPort=37655, infoSecurePort=0, ipcPort=44053, storageInfo=lv=-57;cid=testClusterID;nsid=2000708287;c=1731896904097) 2024-11-18T02:28:26,354 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c5c3845211ad689 with lease ID 0xe41a4efa8f73ecbe: from storage DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce node DatanodeRegistration(127.0.0.1:33973, datanodeUuid=15fc9655-10b9-43cd-a1fc-f70e1b50c326, infoPort=37655, infoSecurePort=0, ipcPort=44053, storageInfo=lv=-57;cid=testClusterID;nsid=2000708287;c=1731896904097), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:28:26,354 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd8f2125ddb80296 with lease ID 0xe41a4efa8f73ecbd: Processing first storage report for DS-e4b6675b-6d82-4efa-b6c5-1741fefdbb4d from datanode DatanodeRegistration(127.0.0.1:46563, datanodeUuid=d0d61583-8fe2-420c-bddd-55f1f47f3ea9, infoPort=37667, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=2000708287;c=1731896904097) 2024-11-18T02:28:26,354 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd8f2125ddb80296 with lease ID 0xe41a4efa8f73ecbd: from storage DS-e4b6675b-6d82-4efa-b6c5-1741fefdbb4d node DatanodeRegistration(127.0.0.1:46563, datanodeUuid=d0d61583-8fe2-420c-bddd-55f1f47f3ea9, infoPort=37667, infoSecurePort=0, ipcPort=41139, storageInfo=lv=-57;cid=testClusterID;nsid=2000708287;c=1731896904097), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:28:26,354 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6c5c3845211ad689 with lease ID 0xe41a4efa8f73ecbe: Processing first storage report for DS-da280c2f-b1ed-4565-a000-e393790f7749 from datanode DatanodeRegistration(127.0.0.1:33973, datanodeUuid=15fc9655-10b9-43cd-a1fc-f70e1b50c326, infoPort=37655, infoSecurePort=0, ipcPort=44053, storageInfo=lv=-57;cid=testClusterID;nsid=2000708287;c=1731896904097) 2024-11-18T02:28:26,355 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c5c3845211ad689 with lease ID 0xe41a4efa8f73ecbe: from storage DS-da280c2f-b1ed-4565-a000-e393790f7749 node DatanodeRegistration(127.0.0.1:33973, datanodeUuid=15fc9655-10b9-43cd-a1fc-f70e1b50c326, infoPort=37655, infoSecurePort=0, ipcPort=44053, storageInfo=lv=-57;cid=testClusterID;nsid=2000708287;c=1731896904097), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T02:28:26,384 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8 2024-11-18T02:28:26,480 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/zookeeper_0, clientPort=64207, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T02:28:26,502 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64207 2024-11-18T02:28:26,513 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:28:26,517 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:28:26,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:28:26,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:28:27,224 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4 with version=8 2024-11-18T02:28:27,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/hbase-staging 2024-11-18T02:28:27,316 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-18T02:28:27,564 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:28:27,575 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:28:27,575 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:28:27,579 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:28:27,579 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:28:27,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:28:27,713 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T02:28:27,773 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-18T02:28:27,781 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-18T02:28:27,785 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:28:27,811 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20337 (auto-detected) 2024-11-18T02:28:27,812 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-18T02:28:27,830 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32911 2024-11-18T02:28:27,851 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32911 connecting to ZooKeeper ensemble=127.0.0.1:64207 2024-11-18T02:28:27,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329110x0, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:28:27,887 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32911-0x10128e77d600000 connected 2024-11-18T02:28:27,928 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:28:27,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:28:27,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:28:27,945 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4, hbase.cluster.distributed=false 2024-11-18T02:28:27,968 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:28:27,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32911 2024-11-18T02:28:27,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32911 2024-11-18T02:28:27,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32911 2024-11-18T02:28:27,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32911 2024-11-18T02:28:27,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32911 2024-11-18T02:28:28,103 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:28:28,105 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:28:28,105 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:28:28,105 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:28:28,106 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:28:28,106 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:28:28,109 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T02:28:28,112 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:28:28,113 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44819 2024-11-18T02:28:28,115 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44819 connecting to ZooKeeper ensemble=127.0.0.1:64207 2024-11-18T02:28:28,117 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:28:28,123 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:28:28,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448190x0, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:28:28,134 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44819-0x10128e77d600001 connected 2024-11-18T02:28:28,134 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:28:28,138 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T02:28:28,147 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T02:28:28,149 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T02:28:28,155 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:28:28,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44819 2024-11-18T02:28:28,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44819 2024-11-18T02:28:28,157 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44819 2024-11-18T02:28:28,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44819 2024-11-18T02:28:28,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44819 2024-11-18T02:28:28,174 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c4730a2bacf8:32911 2024-11-18T02:28:28,174 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c4730a2bacf8,32911,1731896907368 2024-11-18T02:28:28,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:28:28,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:28:28,183 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c4730a2bacf8,32911,1731896907368 2024-11-18T02:28:28,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T02:28:28,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,205 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T02:28:28,207 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c4730a2bacf8,32911,1731896907368 from backup master directory 2024-11-18T02:28:28,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c4730a2bacf8,32911,1731896907368 2024-11-18T02:28:28,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:28:28,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:28:28,211 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:28:28,211 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c4730a2bacf8,32911,1731896907368 2024-11-18T02:28:28,213 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-18T02:28:28,214 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-18T02:28:28,274 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/hbase.id] with ID: 003db97e-f57e-497e-8a52-9820c58ee093 2024-11-18T02:28:28,274 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/.tmp/hbase.id 2024-11-18T02:28:28,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:28:28,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:28:28,287 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/.tmp/hbase.id]:[hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/hbase.id] 2024-11-18T02:28:28,331 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:28:28,337 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T02:28:28,356 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-18T02:28:28,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:28:28,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:28:28,394 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:28:28,397 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T02:28:28,405 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:28:28,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:28:28,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:28:28,466 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store 2024-11-18T02:28:28,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:28:28,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:28:28,492 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-18T02:28:28,495 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:28:28,497 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:28:28,497 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:28:28,497 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:28:28,499 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:28:28,499 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:28:28,499 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:28:28,501 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731896908496Disabling compacts and flushes for region at 1731896908496Disabling writes for close at 1731896908499 (+3 ms)Writing region close event to WAL at 1731896908499Closed at 1731896908499 2024-11-18T02:28:28,503 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/.initializing 2024-11-18T02:28:28,503 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/WALs/c4730a2bacf8,32911,1731896907368 2024-11-18T02:28:28,529 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C32911%2C1731896907368, suffix=, logDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/WALs/c4730a2bacf8,32911,1731896907368, archiveDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/oldWALs, maxLogs=10 2024-11-18T02:28:28,538 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C32911%2C1731896907368.1731896908533 2024-11-18T02:28:28,562 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/WALs/c4730a2bacf8,32911,1731896907368/c4730a2bacf8%2C32911%2C1731896907368.1731896908533 2024-11-18T02:28:28,574 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37655:37655),(127.0.0.1/127.0.0.1:37667:37667)] 2024-11-18T02:28:28,575 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:28:28,576 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:28:28,579 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,580 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T02:28:28,648 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:28,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:28:28,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,656 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T02:28:28,657 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:28,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:28:28,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,660 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T02:28:28,661 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:28,662 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:28:28,662 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,664 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T02:28:28,664 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:28,665 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:28:28,666 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,671 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,672 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,676 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,677 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,680 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T02:28:28,683 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:28:28,688 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:28:28,689 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853070, jitterRate=0.08473502099514008}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T02:28:28,698 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731896908593Initializing all the Stores at 1731896908595 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731896908595Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731896908596 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731896908596Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731896908597 (+1 ms)Cleaning up temporary data from old regions at 1731896908677 (+80 ms)Region opened successfully at 1731896908698 (+21 ms) 2024-11-18T02:28:28,699 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T02:28:28,737 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cb1742a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:28:28,770 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T02:28:28,782 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T02:28:28,782 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T02:28:28,786 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T02:28:28,787 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-18T02:28:28,792 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-18T02:28:28,792 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T02:28:28,818 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T02:28:28,827 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T02:28:28,829 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T02:28:28,831 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T02:28:28,833 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T02:28:28,837 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T02:28:28,839 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T02:28:28,842 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T02:28:28,844 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T02:28:28,845 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T02:28:28,847 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T02:28:28,863 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T02:28:28,865 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T02:28:28,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:28:28,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:28:28,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,874 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c4730a2bacf8,32911,1731896907368, sessionid=0x10128e77d600000, setting cluster-up flag (Was=false) 2024-11-18T02:28:28,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,894 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T02:28:28,896 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,32911,1731896907368 2024-11-18T02:28:28,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:28,908 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T02:28:28,910 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,32911,1731896907368 2024-11-18T02:28:28,916 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T02:28:28,962 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(746): ClusterId : 003db97e-f57e-497e-8a52-9820c58ee093 2024-11-18T02:28:28,965 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T02:28:28,971 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T02:28:28,971 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T02:28:28,980 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T02:28:28,981 DEBUG [RS:0;c4730a2bacf8:44819 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6de75ef1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:28:28,990 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T02:28:28,995 DEBUG [RS:0;c4730a2bacf8:44819 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c4730a2bacf8:44819 2024-11-18T02:28:28,998 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T02:28:28,998 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T02:28:28,998 DEBUG [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T02:28:28,999 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T02:28:29,000 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(2659): reportForDuty to master=c4730a2bacf8,32911,1731896907368 with port=44819, startcode=1731896908062 2024-11-18T02:28:29,006 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T02:28:29,014 DEBUG [RS:0;c4730a2bacf8:44819 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T02:28:29,015 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c4730a2bacf8,32911,1731896907368 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T02:28:29,024 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:28:29,024 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:28:29,025 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:28:29,025 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:28:29,025 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c4730a2bacf8:0, corePoolSize=10, maxPoolSize=10 2024-11-18T02:28:29,026 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,026 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:28:29,026 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,028 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731896939028 2024-11-18T02:28:29,030 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T02:28:29,031 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:28:29,032 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T02:28:29,032 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T02:28:29,037 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T02:28:29,037 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T02:28:29,038 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T02:28:29,038 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T02:28:29,039 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:29,040 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T02:28:29,039 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,044 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T02:28:29,045 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T02:28:29,045 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T02:28:29,048 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T02:28:29,048 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T02:28:29,052 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731896909050,5,FailOnTimeoutGroup] 2024-11-18T02:28:29,054 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731896909053,5,FailOnTimeoutGroup] 2024-11-18T02:28:29,054 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,055 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T02:28:29,056 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,056 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:28:29,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:28:29,068 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T02:28:29,068 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4 2024-11-18T02:28:29,098 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56641, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T02:28:29,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:28:29,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:28:29,101 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:28:29,105 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32911 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:29,107 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32911 {}] master.ServerManager(517): Registering regionserver=c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:29,109 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:28:29,112 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:28:29,112 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:29,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:28:29,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:28:29,116 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:28:29,116 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:29,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:28:29,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:28:29,120 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:28:29,121 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:29,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:28:29,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:28:29,124 DEBUG [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4 2024-11-18T02:28:29,124 DEBUG [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33317 2024-11-18T02:28:29,124 DEBUG [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T02:28:29,125 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:28:29,125 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:29,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:28:29,126 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:28:29,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740 2024-11-18T02:28:29,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:28:29,129 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740 2024-11-18T02:28:29,130 DEBUG [RS:0;c4730a2bacf8:44819 {}] zookeeper.ZKUtil(111): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:29,130 WARN [RS:0;c4730a2bacf8:44819 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:28:29,130 INFO [RS:0;c4730a2bacf8:44819 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:28:29,130 DEBUG [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:29,132 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c4730a2bacf8,44819,1731896908062] 2024-11-18T02:28:29,133 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:28:29,133 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:28:29,134 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:28:29,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:28:29,141 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:28:29,142 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860009, jitterRate=0.09355820715427399}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:28:29,145 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731896909103Initializing all the Stores at 1731896909105 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731896909105Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731896909108 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731896909108Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731896909108Cleaning up temporary data from old regions at 1731896909133 (+25 ms)Region opened successfully at 1731896909145 (+12 ms) 2024-11-18T02:28:29,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:28:29,146 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:28:29,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:28:29,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:28:29,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:28:29,148 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:28:29,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731896909145Disabling compacts and flushes for region at 1731896909145Disabling writes for close at 1731896909146 (+1 ms)Writing region close event to WAL at 1731896909147 (+1 ms)Closed at 1731896909147 2024-11-18T02:28:29,151 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:28:29,152 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T02:28:29,161 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T02:28:29,163 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T02:28:29,171 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:28:29,174 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T02:28:29,180 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T02:28:29,184 INFO [RS:0;c4730a2bacf8:44819 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T02:28:29,185 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,185 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T02:28:29,192 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T02:28:29,193 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,193 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,193 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,194 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,194 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,194 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,194 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:28:29,194 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,194 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,194 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,195 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,195 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,195 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:28:29,195 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:28:29,195 DEBUG [RS:0;c4730a2bacf8:44819 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:28:29,196 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,196 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,197 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,197 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,197 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,197 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,44819,1731896908062-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:28:29,216 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T02:28:29,217 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,44819,1731896908062-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,218 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,218 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.Replication(171): c4730a2bacf8,44819,1731896908062 started 2024-11-18T02:28:29,237 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:29,237 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(1482): Serving as c4730a2bacf8,44819,1731896908062, RpcServer on c4730a2bacf8/172.17.0.2:44819, sessionid=0x10128e77d600001 2024-11-18T02:28:29,238 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T02:28:29,238 DEBUG [RS:0;c4730a2bacf8:44819 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:29,239 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,44819,1731896908062' 2024-11-18T02:28:29,239 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T02:28:29,240 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T02:28:29,240 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T02:28:29,240 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T02:28:29,241 DEBUG [RS:0;c4730a2bacf8:44819 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:29,241 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,44819,1731896908062' 2024-11-18T02:28:29,241 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T02:28:29,241 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T02:28:29,242 DEBUG [RS:0;c4730a2bacf8:44819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T02:28:29,242 INFO [RS:0;c4730a2bacf8:44819 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T02:28:29,242 INFO [RS:0;c4730a2bacf8:44819 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T02:28:29,325 WARN [c4730a2bacf8:32911 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T02:28:29,351 INFO [RS:0;c4730a2bacf8:44819 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C44819%2C1731896908062, suffix=, logDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062, archiveDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs, maxLogs=32 2024-11-18T02:28:29,354 INFO [RS:0;c4730a2bacf8:44819 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.1731896909354 2024-11-18T02:28:29,362 INFO [RS:0;c4730a2bacf8:44819 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896909354 2024-11-18T02:28:29,365 DEBUG [RS:0;c4730a2bacf8:44819 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:37655:37655)] 2024-11-18T02:28:29,577 DEBUG [c4730a2bacf8:32911 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T02:28:29,590 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:29,596 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,44819,1731896908062, state=OPENING 2024-11-18T02:28:29,601 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T02:28:29,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:29,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:28:29,605 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:28:29,605 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:28:29,606 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:28:29,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,44819,1731896908062}] 2024-11-18T02:28:29,782 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T02:28:29,785 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60739, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T02:28:29,795 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T02:28:29,795 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:28:29,799 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C44819%2C1731896908062.meta, suffix=.meta, logDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062, archiveDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs, maxLogs=32 2024-11-18T02:28:29,802 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.meta.1731896909801.meta 2024-11-18T02:28:29,810 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.meta.1731896909801.meta 2024-11-18T02:28:29,813 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:37655:37655)] 2024-11-18T02:28:29,814 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:28:29,815 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T02:28:29,819 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T02:28:29,823 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T02:28:29,828 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T02:28:29,828 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:28:29,828 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T02:28:29,829 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T02:28:29,832 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:28:29,834 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:28:29,834 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:29,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:28:29,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:28:29,837 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:28:29,837 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:29,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:28:29,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:28:29,840 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:28:29,840 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:29,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:28:29,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:28:29,843 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:28:29,843 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:29,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:28:29,844 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:28:29,845 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740 2024-11-18T02:28:29,848 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740 2024-11-18T02:28:29,850 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:28:29,850 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:28:29,851 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:28:29,854 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:28:29,856 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766026, jitterRate=-0.025948330760002136}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:28:29,856 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T02:28:29,858 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731896909829Writing region info on filesystem at 1731896909829Initializing all the Stores at 1731896909831 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731896909831Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731896909831Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731896909831Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731896909832 (+1 ms)Cleaning up temporary data from old regions at 1731896909850 (+18 ms)Running coprocessor post-open hooks at 1731896909856 (+6 ms)Region opened successfully at 1731896909858 (+2 ms) 2024-11-18T02:28:29,865 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731896909773 2024-11-18T02:28:29,877 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T02:28:29,877 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T02:28:29,879 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:29,881 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,44819,1731896908062, state=OPEN 2024-11-18T02:28:29,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:28:29,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:28:29,889 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:28:29,889 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:28:29,889 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:29,894 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T02:28:29,894 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,44819,1731896908062 in 282 msec 2024-11-18T02:28:29,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T02:28:29,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 736 msec 2024-11-18T02:28:29,903 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:28:29,903 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T02:28:29,923 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:28:29,924 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,44819,1731896908062, seqNum=-1] 2024-11-18T02:28:29,944 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:28:29,946 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33129, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:28:29,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0230 sec 2024-11-18T02:28:29,968 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731896909968, completionTime=-1 2024-11-18T02:28:29,972 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T02:28:29,972 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T02:28:30,004 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T02:28:30,004 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731896970004 2024-11-18T02:28:30,004 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731897030004 2024-11-18T02:28:30,005 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 32 msec 2024-11-18T02:28:30,007 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,32911,1731896907368-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:30,008 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,32911,1731896907368-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:30,008 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,32911,1731896907368-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:30,009 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c4730a2bacf8:32911, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:30,010 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:30,010 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:30,017 DEBUG [master/c4730a2bacf8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T02:28:30,038 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.827sec 2024-11-18T02:28:30,039 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T02:28:30,040 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T02:28:30,041 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T02:28:30,042 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T02:28:30,042 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T02:28:30,043 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,32911,1731896907368-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:28:30,043 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,32911,1731896907368-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T02:28:30,051 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T02:28:30,053 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T02:28:30,053 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,32911,1731896907368-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:28:30,076 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@be54b53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:28:30,079 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-18T02:28:30,079 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-18T02:28:30,084 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c4730a2bacf8,32911,-1 for getting cluster id 2024-11-18T02:28:30,087 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T02:28:30,096 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '003db97e-f57e-497e-8a52-9820c58ee093' 2024-11-18T02:28:30,099 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T02:28:30,099 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "003db97e-f57e-497e-8a52-9820c58ee093" 2024-11-18T02:28:30,099 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d77fc09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:28:30,099 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c4730a2bacf8,32911,-1] 2024-11-18T02:28:30,102 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T02:28:30,104 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:28:30,105 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48304, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T02:28:30,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@233000b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:28:30,109 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:28:30,117 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,44819,1731896908062, seqNum=-1] 2024-11-18T02:28:30,118 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:28:30,121 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47236, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:28:30,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c4730a2bacf8,32911,1731896907368 2024-11-18T02:28:30,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:28:30,151 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T02:28:30,156 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T02:28:30,184 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c4730a2bacf8,32911,1731896907368 2024-11-18T02:28:30,188 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@206c080d 2024-11-18T02:28:30,189 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T02:28:30,194 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48312, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T02:28:30,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32911 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T02:28:30,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32911 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T02:28:30,201 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32911 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:28:30,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32911 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-18T02:28:30,216 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T02:28:30,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32911 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-18T02:28:30,219 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:30,221 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T02:28:30,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32911 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:28:30,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741835_1011 (size=389) 2024-11-18T02:28:30,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741835_1011 (size=389) 2024-11-18T02:28:30,277 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c7375e78eb1d87681d3ddf687edd1b96, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4 2024-11-18T02:28:30,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741836_1012 (size=72) 2024-11-18T02:28:30,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741836_1012 (size=72) 2024-11-18T02:28:30,287 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:28:30,287 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing c7375e78eb1d87681d3ddf687edd1b96, disabling compactions & flushes 2024-11-18T02:28:30,288 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:28:30,288 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:28:30,288 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. after waiting 0 ms 2024-11-18T02:28:30,288 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:28:30,288 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:28:30,288 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c7375e78eb1d87681d3ddf687edd1b96: Waiting for close lock at 1731896910287Disabling compacts and flushes for region at 1731896910287Disabling writes for close at 1731896910288 (+1 ms)Writing region close event to WAL at 1731896910288Closed at 1731896910288 2024-11-18T02:28:30,290 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T02:28:30,295 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731896910290"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731896910290"}]},"ts":"1731896910290"} 2024-11-18T02:28:30,301 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T02:28:30,302 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T02:28:30,305 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731896910303"}]},"ts":"1731896910303"} 2024-11-18T02:28:30,309 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-18T02:28:30,311 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c7375e78eb1d87681d3ddf687edd1b96, ASSIGN}] 2024-11-18T02:28:30,313 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c7375e78eb1d87681d3ddf687edd1b96, ASSIGN 2024-11-18T02:28:30,315 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c7375e78eb1d87681d3ddf687edd1b96, ASSIGN; state=OFFLINE, location=c4730a2bacf8,44819,1731896908062; forceNewPlan=false, retain=false 2024-11-18T02:28:30,467 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c7375e78eb1d87681d3ddf687edd1b96, regionState=OPENING, regionLocation=c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:30,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c7375e78eb1d87681d3ddf687edd1b96, ASSIGN because future has completed 2024-11-18T02:28:30,473 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c7375e78eb1d87681d3ddf687edd1b96, server=c4730a2bacf8,44819,1731896908062}] 2024-11-18T02:28:30,634 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:28:30,634 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c7375e78eb1d87681d3ddf687edd1b96, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:28:30,635 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,635 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:28:30,635 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,635 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,637 INFO [StoreOpener-c7375e78eb1d87681d3ddf687edd1b96-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,639 INFO [StoreOpener-c7375e78eb1d87681d3ddf687edd1b96-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c7375e78eb1d87681d3ddf687edd1b96 columnFamilyName info 2024-11-18T02:28:30,639 DEBUG [StoreOpener-c7375e78eb1d87681d3ddf687edd1b96-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:28:30,640 INFO [StoreOpener-c7375e78eb1d87681d3ddf687edd1b96-1 {}] regionserver.HStore(327): Store=c7375e78eb1d87681d3ddf687edd1b96/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:28:30,641 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,642 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,643 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,643 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,643 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,646 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,649 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:28:30,649 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c7375e78eb1d87681d3ddf687edd1b96; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781420, jitterRate=-0.006373405456542969}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T02:28:30,650 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:30,650 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c7375e78eb1d87681d3ddf687edd1b96: Running coprocessor pre-open hook at 1731896910635Writing region info on filesystem at 1731896910635Initializing all the Stores at 1731896910637 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731896910637Cleaning up temporary data from old regions at 1731896910643 (+6 ms)Running coprocessor post-open hooks at 1731896910650 (+7 ms)Region opened successfully at 1731896910650 2024-11-18T02:28:30,652 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96., pid=6, masterSystemTime=1731896910627 2024-11-18T02:28:30,656 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:28:30,656 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:28:30,657 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c7375e78eb1d87681d3ddf687edd1b96, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,44819,1731896908062 2024-11-18T02:28:30,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c7375e78eb1d87681d3ddf687edd1b96, server=c4730a2bacf8,44819,1731896908062 because future has completed 2024-11-18T02:28:30,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T02:28:30,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c7375e78eb1d87681d3ddf687edd1b96, server=c4730a2bacf8,44819,1731896908062 in 191 msec 2024-11-18T02:28:30,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T02:28:30,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c7375e78eb1d87681d3ddf687edd1b96, ASSIGN in 356 msec 2024-11-18T02:28:30,673 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T02:28:30,673 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731896910673"}]},"ts":"1731896910673"} 2024-11-18T02:28:30,677 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-18T02:28:30,678 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T02:28:30,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 474 msec 2024-11-18T02:28:35,332 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-18T02:28:35,374 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T02:28:35,375 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-18T02:28:37,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:28:37,771 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T02:28:37,773 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T02:28:37,773 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T02:28:37,774 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:28:37,774 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T02:28:37,775 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T02:28:37,775 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T02:28:40,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32911 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:28:40,335 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-18T02:28:40,338 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-18T02:28:40,344 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-18T02:28:40,345 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:28:40,346 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.1731896920345 2024-11-18T02:28:40,354 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:28:40,355 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:28:40,355 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:28:40,355 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:28:40,355 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:28:40,355 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896909354 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896920345 2024-11-18T02:28:40,357 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:37655:37655)] 2024-11-18T02:28:40,357 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896909354 is not closed yet, will try archiving it next time 2024-11-18T02:28:40,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741833_1009 (size=451) 2024-11-18T02:28:40,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741833_1009 (size=451) 2024-11-18T02:28:40,361 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896909354 to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs/c4730a2bacf8%2C44819%2C1731896908062.1731896909354 2024-11-18T02:28:40,366 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96., hostname=c4730a2bacf8,44819,1731896908062, seqNum=2] 2024-11-18T02:28:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44819 {}] regionserver.HRegion(8855): Flush requested on c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:28:52,404 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c7375e78eb1d87681d3ddf687edd1b96 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:28:52,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/7d084845715c4dd5a2898fa762ac77aa is 1080, key is row0001/info:/1731896920369/Put/seqid=0 2024-11-18T02:28:52,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741838_1014 (size=12509) 2024-11-18T02:28:52,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741838_1014 (size=12509) 2024-11-18T02:28:52,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/7d084845715c4dd5a2898fa762ac77aa 2024-11-18T02:28:52,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/7d084845715c4dd5a2898fa762ac77aa as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/7d084845715c4dd5a2898fa762ac77aa 2024-11-18T02:28:52,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/7d084845715c4dd5a2898fa762ac77aa, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T02:28:52,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c7375e78eb1d87681d3ddf687edd1b96 in 207ms, sequenceid=11, compaction requested=false 2024-11-18T02:28:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c7375e78eb1d87681d3ddf687edd1b96: 2024-11-18T02:28:56,381 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T02:29:00,413 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.1731896940412 2024-11-18T02:29:00,621 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:00,621 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:00,622 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:00,622 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:00,622 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:00,622 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:00,622 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896920345 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896940412 2024-11-18T02:29:00,624 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37655:37655),(127.0.0.1/127.0.0.1:37667:37667)] 2024-11-18T02:29:00,624 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896920345 is not closed yet, will try archiving it next time 2024-11-18T02:29:00,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741837_1013 (size=12399) 2024-11-18T02:29:00,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741837_1013 (size=12399) 2024-11-18T02:29:00,827 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:03,032 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:05,237 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:07,441 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:07,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44819 {}] regionserver.HRegion(8855): Flush requested on c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:29:07,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c7375e78eb1d87681d3ddf687edd1b96 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:29:07,644 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:07,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/6dbfec308fb34dbcba0b742325b2d59c is 1080, key is row0008/info:/1731896934401/Put/seqid=0 2024-11-18T02:29:07,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741840_1016 (size=12509) 2024-11-18T02:29:07,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741840_1016 (size=12509) 2024-11-18T02:29:07,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/6dbfec308fb34dbcba0b742325b2d59c 2024-11-18T02:29:07,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/6dbfec308fb34dbcba0b742325b2d59c as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/6dbfec308fb34dbcba0b742325b2d59c 2024-11-18T02:29:07,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/6dbfec308fb34dbcba0b742325b2d59c, entries=7, sequenceid=21, filesize=12.2 K 2024-11-18T02:29:07,880 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:07,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c7375e78eb1d87681d3ddf687edd1b96 in 439ms, sequenceid=21, compaction requested=false 2024-11-18T02:29:07,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c7375e78eb1d87681d3ddf687edd1b96: 2024-11-18T02:29:07,881 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-18T02:29:07,881 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:29:07,882 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/7d084845715c4dd5a2898fa762ac77aa because midkey is the same as first or last row 2024-11-18T02:29:09,646 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:10,195 INFO [master/c4730a2bacf8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T02:29:10,196 INFO [master/c4730a2bacf8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T02:29:11,850 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:11,852 WARN [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:11,852 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C44819%2C1731896908062:(num 1731896940412) roll requested 2024-11-18T02:29:11,853 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.1731896951853 2024-11-18T02:29:12,061 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK], DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK]] 2024-11-18T02:29:12,061 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:12,061 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:12,062 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:12,062 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:12,062 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:12,062 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896940412 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896951853 2024-11-18T02:29:12,063 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:37655:37655)] 2024-11-18T02:29:12,063 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896940412 is not closed yet, will try archiving it next time 2024-11-18T02:29:12,063 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896920345 to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs/c4730a2bacf8%2C44819%2C1731896908062.1731896920345 2024-11-18T02:29:12,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741839_1015 (size=7739) 2024-11-18T02:29:12,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741839_1015 (size=7739) 2024-11-18T02:29:14,054 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:15,635 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c7375e78eb1d87681d3ddf687edd1b96, had cached 0 bytes from a total of 25018 2024-11-18T02:29:16,258 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:18,462 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:20,666 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:22,668 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T02:29:22,669 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.1731896962668 2024-11-18T02:29:26,381 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T02:29:27,679 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:27,680 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:27,680 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C44819%2C1731896908062:(num 1731896962668) roll requested 2024-11-18T02:29:27,681 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:27,681 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:27,681 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:27,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:27,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:27,681 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896951853 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896962668 2024-11-18T02:29:27,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741841_1017 (size=4753) 2024-11-18T02:29:27,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741841_1017 (size=4753) 2024-11-18T02:29:27,693 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:37655:37655)] 2024-11-18T02:29:27,693 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896951853 is not closed yet, will try archiving it next time 2024-11-18T02:29:27,693 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.1731896967693 2024-11-18T02:29:32,697 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:32,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44819 {}] regionserver.HRegion(8855): Flush requested on c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:29:32,697 WARN [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:32,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c7375e78eb1d87681d3ddf687edd1b96 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:29:32,713 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:32,713 WARN [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:34,698 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T02:29:37,700 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:37,700 WARN [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:37,700 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:37,700 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:37,701 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:37,701 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:37,701 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:37,703 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896962668 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896967693 2024-11-18T02:29:37,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741842_1018 (size=1569) 2024-11-18T02:29:37,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741842_1018 (size=1569) 2024-11-18T02:29:37,714 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:37655:37655)] 2024-11-18T02:29:37,714 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896962668 is not closed yet, will try archiving it next time 2024-11-18T02:29:37,717 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C44819%2C1731896908062:(num 1731896967693) roll requested 2024-11-18T02:29:37,718 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.1731896977718 2024-11-18T02:29:37,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/2ce0e022da524e30a8b51a5993424cb6 is 1080, key is row0015/info:/1731896949444/Put/seqid=0 2024-11-18T02:29:37,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741844_1020 (size=12509) 2024-11-18T02:29:37,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741844_1020 (size=12509) 2024-11-18T02:29:37,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/2ce0e022da524e30a8b51a5993424cb6 2024-11-18T02:29:37,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/2ce0e022da524e30a8b51a5993424cb6 as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/2ce0e022da524e30a8b51a5993424cb6 2024-11-18T02:29:37,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/2ce0e022da524e30a8b51a5993424cb6, entries=7, sequenceid=31, filesize=12.2 K 2024-11-18T02:29:42,748 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5021 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:42,748 WARN [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5021 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:42,765 INFO [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:42,765 WARN [FSHLog-0-hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4-prefix:c4730a2bacf8,44819,1731896908062 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46563,DS-47ee40a7-2d96-4def-8a9f-5c0eb5cb6b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:33973,DS-8f3740a1-2acd-47b9-aef2-59369f00b4ce,DISK]] 2024-11-18T02:29:42,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c7375e78eb1d87681d3ddf687edd1b96 in 10068ms, sequenceid=31, compaction requested=true 2024-11-18T02:29:42,766 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c7375e78eb1d87681d3ddf687edd1b96: 2024-11-18T02:29:42,766 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,766 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-18T02:29:42,766 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:29:42,766 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/7d084845715c4dd5a2898fa762ac77aa because midkey is the same as first or last row 2024-11-18T02:29:42,768 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,768 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,768 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c7375e78eb1d87681d3ddf687edd1b96:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:29:42,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:29:42,771 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896967693 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896977718 2024-11-18T02:29:42,771 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:29:42,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741843_1019 (size=438) 2024-11-18T02:29:42,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741843_1019 (size=438) 2024-11-18T02:29:42,776 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:29:42,778 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.HStore(1541): c7375e78eb1d87681d3ddf687edd1b96/info is initiating minor compaction (all files) 2024-11-18T02:29:42,778 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896940412 to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs/c4730a2bacf8%2C44819%2C1731896908062.1731896940412 2024-11-18T02:29:42,779 INFO [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c7375e78eb1d87681d3ddf687edd1b96/info in TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:29:42,780 INFO [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/7d084845715c4dd5a2898fa762ac77aa, hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/6dbfec308fb34dbcba0b742325b2d59c, hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/2ce0e022da524e30a8b51a5993424cb6] into tmpdir=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp, totalSize=36.6 K 2024-11-18T02:29:42,780 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896951853 to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs/c4730a2bacf8%2C44819%2C1731896908062.1731896951853 2024-11-18T02:29:42,782 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896962668 to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs/c4730a2bacf8%2C44819%2C1731896908062.1731896962668 2024-11-18T02:29:42,783 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7d084845715c4dd5a2898fa762ac77aa, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731896920369 2024-11-18T02:29:42,784 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6dbfec308fb34dbcba0b742325b2d59c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731896934401 2024-11-18T02:29:42,784 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896967693 to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs/c4730a2bacf8%2C44819%2C1731896908062.1731896967693 2024-11-18T02:29:42,785 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2ce0e022da524e30a8b51a5993424cb6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731896949444 2024-11-18T02:29:42,808 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:37655:37655)] 2024-11-18T02:29:42,809 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C44819%2C1731896908062:(num 1731896977718) roll requested 2024-11-18T02:29:42,809 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.1731896982809 2024-11-18T02:29:42,846 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,846 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,847 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,847 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,847 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,847 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896977718 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896982809 2024-11-18T02:29:42,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741845_1021 (size=93) 2024-11-18T02:29:42,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741845_1021 (size=93) 2024-11-18T02:29:42,858 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896977718 to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs/c4730a2bacf8%2C44819%2C1731896908062.1731896977718 2024-11-18T02:29:42,871 INFO [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c7375e78eb1d87681d3ddf687edd1b96#info#compaction#3 average throughput is 2.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:29:42,873 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/b2946b01b01548698715ba6063990fa9 is 1080, key is row0001/info:/1731896920369/Put/seqid=0 2024-11-18T02:29:42,877 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37655:37655),(127.0.0.1/127.0.0.1:37667:37667)] 2024-11-18T02:29:42,877 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44819%2C1731896908062.1731896982877 2024-11-18T02:29:42,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741847_1023 (size=27710) 2024-11-18T02:29:42,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741847_1023 (size=27710) 2024-11-18T02:29:42,921 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/b2946b01b01548698715ba6063990fa9 as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/b2946b01b01548698715ba6063990fa9 2024-11-18T02:29:42,929 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,931 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,931 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,932 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,932 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:29:42,932 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896982809 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896982877 2024-11-18T02:29:42,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741846_1022 (size=1258) 2024-11-18T02:29:42,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741846_1022 (size=1258) 2024-11-18T02:29:42,949 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:37655:37655)] 2024-11-18T02:29:42,949 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/WALs/c4730a2bacf8,44819,1731896908062/c4730a2bacf8%2C44819%2C1731896908062.1731896982809 is not closed yet, will try archiving it next time 2024-11-18T02:29:42,954 INFO [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c7375e78eb1d87681d3ddf687edd1b96/info of c7375e78eb1d87681d3ddf687edd1b96 into b2946b01b01548698715ba6063990fa9(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:29:42,954 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c7375e78eb1d87681d3ddf687edd1b96: 2024-11-18T02:29:42,957 INFO [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96., storeName=c7375e78eb1d87681d3ddf687edd1b96/info, priority=13, startTime=1731896982768; duration=0sec 2024-11-18T02:29:42,957 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T02:29:42,957 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:29:42,957 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/b2946b01b01548698715ba6063990fa9 because midkey is the same as first or last row 2024-11-18T02:29:42,958 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T02:29:42,958 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:29:42,958 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/b2946b01b01548698715ba6063990fa9 because midkey is the same as first or last row 2024-11-18T02:29:42,958 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T02:29:42,958 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:29:42,958 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/b2946b01b01548698715ba6063990fa9 because midkey is the same as first or last row 2024-11-18T02:29:42,958 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:29:42,958 DEBUG [RS:0;c4730a2bacf8:44819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c7375e78eb1d87681d3ddf687edd1b96:info 2024-11-18T02:29:54,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44819 {}] regionserver.HRegion(8855): Flush requested on c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:29:54,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c7375e78eb1d87681d3ddf687edd1b96 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:29:54,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/337f40d58ea144b699ee1624658a5f55 is 1080, key is row0022/info:/1731896982879/Put/seqid=0 2024-11-18T02:29:54,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741849_1025 (size=12509) 2024-11-18T02:29:54,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741849_1025 (size=12509) 2024-11-18T02:29:54,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/337f40d58ea144b699ee1624658a5f55 2024-11-18T02:29:54,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/337f40d58ea144b699ee1624658a5f55 as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/337f40d58ea144b699ee1624658a5f55 2024-11-18T02:29:54,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/337f40d58ea144b699ee1624658a5f55, entries=7, sequenceid=42, filesize=12.2 K 2024-11-18T02:29:54,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c7375e78eb1d87681d3ddf687edd1b96 in 45ms, sequenceid=42, compaction requested=false 2024-11-18T02:29:54,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c7375e78eb1d87681d3ddf687edd1b96: 2024-11-18T02:29:54,953 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-18T02:29:54,953 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:29:54,953 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/b2946b01b01548698715ba6063990fa9 because midkey is the same as first or last row 2024-11-18T02:29:56,381 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T02:30:00,635 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c7375e78eb1d87681d3ddf687edd1b96, had cached 0 bytes from a total of 40219 2024-11-18T02:30:02,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T02:30:02,919 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:30:02,919 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:02,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:02,925 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:02,925 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T02:30:02,925 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T02:30:02,925 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=888866681, stopped=false 2024-11-18T02:30:02,925 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c4730a2bacf8,32911,1731896907368 2024-11-18T02:30:02,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:02,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:02,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:02,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:02,928 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:30:02,928 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:30:02,928 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:02,928 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:02,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:02,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:02,929 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c4730a2bacf8,44819,1731896908062' ***** 2024-11-18T02:30:02,929 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T02:30:02,929 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T02:30:02,929 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T02:30:02,929 INFO [RS:0;c4730a2bacf8:44819 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T02:30:02,930 INFO [RS:0;c4730a2bacf8:44819 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T02:30:02,930 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(3091): Received CLOSE for c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:30:02,930 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(959): stopping server c4730a2bacf8,44819,1731896908062 2024-11-18T02:30:02,930 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:30:02,931 INFO [RS:0;c4730a2bacf8:44819 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c4730a2bacf8:44819. 2024-11-18T02:30:02,931 DEBUG [RS:0;c4730a2bacf8:44819 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:02,931 DEBUG [RS:0;c4730a2bacf8:44819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:02,931 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c7375e78eb1d87681d3ddf687edd1b96, disabling compactions & flushes 2024-11-18T02:30:02,931 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T02:30:02,931 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:30:02,931 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T02:30:02,931 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T02:30:02,931 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:30:02,931 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. after waiting 0 ms 2024-11-18T02:30:02,931 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T02:30:02,931 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:30:02,931 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c7375e78eb1d87681d3ddf687edd1b96 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-18T02:30:02,932 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T02:30:02,932 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:30:02,932 DEBUG [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(1325): Online Regions={c7375e78eb1d87681d3ddf687edd1b96=TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T02:30:02,932 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:30:02,932 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:30:02,932 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:30:02,932 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:30:02,932 DEBUG [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c7375e78eb1d87681d3ddf687edd1b96 2024-11-18T02:30:02,932 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-18T02:30:02,939 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/ced782620b274ff1a46b84f64359eef7 is 1080, key is row0029/info:/1731896996909/Put/seqid=0 2024-11-18T02:30:02,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741850_1026 (size=8193) 2024-11-18T02:30:02,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741850_1026 (size=8193) 2024-11-18T02:30:02,951 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/ced782620b274ff1a46b84f64359eef7 2024-11-18T02:30:02,956 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/.tmp/info/b5e792e5664f4b41a6eebe8b787ce303 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96./info:regioninfo/1731896910657/Put/seqid=0 2024-11-18T02:30:02,962 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/.tmp/info/ced782620b274ff1a46b84f64359eef7 as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/ced782620b274ff1a46b84f64359eef7 2024-11-18T02:30:02,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741851_1027 (size=7016) 2024-11-18T02:30:02,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741851_1027 (size=7016) 2024-11-18T02:30:02,965 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/.tmp/info/b5e792e5664f4b41a6eebe8b787ce303 2024-11-18T02:30:02,972 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/ced782620b274ff1a46b84f64359eef7, entries=3, sequenceid=48, filesize=8.0 K 2024-11-18T02:30:02,974 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for c7375e78eb1d87681d3ddf687edd1b96 in 43ms, sequenceid=48, compaction requested=true 2024-11-18T02:30:02,975 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/7d084845715c4dd5a2898fa762ac77aa, hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/6dbfec308fb34dbcba0b742325b2d59c, hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/2ce0e022da524e30a8b51a5993424cb6] to archive 2024-11-18T02:30:02,978 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T02:30:02,981 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/7d084845715c4dd5a2898fa762ac77aa to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/7d084845715c4dd5a2898fa762ac77aa 2024-11-18T02:30:02,984 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/6dbfec308fb34dbcba0b742325b2d59c to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/6dbfec308fb34dbcba0b742325b2d59c 2024-11-18T02:30:02,986 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/2ce0e022da524e30a8b51a5993424cb6 to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/info/2ce0e022da524e30a8b51a5993424cb6 2024-11-18T02:30:02,994 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/.tmp/ns/778900cde8ff45c7ac6c2df8b6d13ca0 is 43, key is default/ns:d/1731896909951/Put/seqid=0 2024-11-18T02:30:03,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741852_1028 (size=5153) 2024-11-18T02:30:03,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741852_1028 (size=5153) 2024-11-18T02:30:03,003 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/.tmp/ns/778900cde8ff45c7ac6c2df8b6d13ca0 2024-11-18T02:30:03,000 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c4730a2bacf8:32911 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T02:30:03,005 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [7d084845715c4dd5a2898fa762ac77aa=12509, 6dbfec308fb34dbcba0b742325b2d59c=12509, 2ce0e022da524e30a8b51a5993424cb6=12509] 2024-11-18T02:30:03,010 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/default/TestLogRolling-testSlowSyncLogRolling/c7375e78eb1d87681d3ddf687edd1b96/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-18T02:30:03,015 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:30:03,015 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c7375e78eb1d87681d3ddf687edd1b96: Waiting for close lock at 1731897002930Running coprocessor pre-close hooks at 1731897002931 (+1 ms)Disabling compacts and flushes for region at 1731897002931Disabling writes for close at 1731897002931Obtaining lock to block concurrent updates at 1731897002931Preparing flush snapshotting stores in c7375e78eb1d87681d3ddf687edd1b96 at 1731897002931Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731897002932 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. at 1731897002933 (+1 ms)Flushing c7375e78eb1d87681d3ddf687edd1b96/info: creating writer at 1731897002933Flushing c7375e78eb1d87681d3ddf687edd1b96/info: appending metadata at 1731897002938 (+5 ms)Flushing c7375e78eb1d87681d3ddf687edd1b96/info: closing flushed file at 1731897002938Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11c80f1d: reopening flushed file at 1731897002961 (+23 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for c7375e78eb1d87681d3ddf687edd1b96 in 43ms, sequenceid=48, compaction requested=true at 1731897002974 (+13 ms)Writing region close event to WAL at 1731897003005 (+31 ms)Running coprocessor post-close hooks at 1731897003013 (+8 ms)Closed at 1731897003015 (+2 ms) 2024-11-18T02:30:03,016 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731896910196.c7375e78eb1d87681d3ddf687edd1b96. 2024-11-18T02:30:03,038 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/.tmp/table/8fefa6dab71e40849b3feb1d944c5dbb is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731896910673/Put/seqid=0 2024-11-18T02:30:03,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741853_1029 (size=5396) 2024-11-18T02:30:03,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741853_1029 (size=5396) 2024-11-18T02:30:03,046 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/.tmp/table/8fefa6dab71e40849b3feb1d944c5dbb 2024-11-18T02:30:03,056 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/.tmp/info/b5e792e5664f4b41a6eebe8b787ce303 as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/info/b5e792e5664f4b41a6eebe8b787ce303 2024-11-18T02:30:03,065 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/info/b5e792e5664f4b41a6eebe8b787ce303, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T02:30:03,066 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/.tmp/ns/778900cde8ff45c7ac6c2df8b6d13ca0 as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/ns/778900cde8ff45c7ac6c2df8b6d13ca0 2024-11-18T02:30:03,076 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/ns/778900cde8ff45c7ac6c2df8b6d13ca0, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T02:30:03,077 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/.tmp/table/8fefa6dab71e40849b3feb1d944c5dbb as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/table/8fefa6dab71e40849b3feb1d944c5dbb 2024-11-18T02:30:03,086 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/table/8fefa6dab71e40849b3feb1d944c5dbb, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T02:30:03,088 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 156ms, sequenceid=11, compaction requested=false 2024-11-18T02:30:03,095 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T02:30:03,096 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:30:03,096 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:30:03,096 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897002932Running coprocessor pre-close hooks at 1731897002932Disabling compacts and flushes for region at 1731897002932Disabling writes for close at 1731897002932Obtaining lock to block concurrent updates at 1731897002932Preparing flush snapshotting stores in 1588230740 at 1731897002932Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731897002932Flushing stores of hbase:meta,,1.1588230740 at 1731897002933 (+1 ms)Flushing 1588230740/info: creating writer at 1731897002933Flushing 1588230740/info: appending metadata at 1731897002955 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731897002955Flushing 1588230740/ns: creating writer at 1731897002973 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731897002993 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731897002993Flushing 1588230740/table: creating writer at 1731897003014 (+21 ms)Flushing 1588230740/table: appending metadata at 1731897003038 (+24 ms)Flushing 1588230740/table: closing flushed file at 1731897003038Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47d7b7e1: reopening flushed file at 1731897003055 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d2bd421: reopening flushed file at 1731897003065 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35c23bd8: reopening flushed file at 1731897003076 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 156ms, sequenceid=11, compaction requested=false at 1731897003088 (+12 ms)Writing region close event to WAL at 1731897003090 (+2 ms)Running coprocessor post-close hooks at 1731897003096 (+6 ms)Closed at 1731897003096 2024-11-18T02:30:03,097 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T02:30:03,132 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(976): stopping server c4730a2bacf8,44819,1731896908062; all regions closed. 2024-11-18T02:30:03,134 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,135 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,135 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,135 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741834_1010 (size=3066) 2024-11-18T02:30:03,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741834_1010 (size=3066) 2024-11-18T02:30:03,142 DEBUG [RS:0;c4730a2bacf8:44819 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs 2024-11-18T02:30:03,143 INFO [RS:0;c4730a2bacf8:44819 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C44819%2C1731896908062.meta:.meta(num 1731896909801) 2024-11-18T02:30:03,143 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,143 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,143 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,143 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,144 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741848_1024 (size=12695) 2024-11-18T02:30:03,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741848_1024 (size=12695) 2024-11-18T02:30:03,151 DEBUG [RS:0;c4730a2bacf8:44819 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/oldWALs 2024-11-18T02:30:03,151 INFO [RS:0;c4730a2bacf8:44819 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C44819%2C1731896908062:(num 1731896982877) 2024-11-18T02:30:03,151 DEBUG [RS:0;c4730a2bacf8:44819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:03,151 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:30:03,152 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:30:03,152 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.ChoreService(370): Chore service for: regionserver/c4730a2bacf8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T02:30:03,152 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:30:03,152 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:30:03,153 INFO [RS:0;c4730a2bacf8:44819 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44819 2024-11-18T02:30:03,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:30:03,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c4730a2bacf8,44819,1731896908062 2024-11-18T02:30:03,158 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:30:03,159 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c4730a2bacf8,44819,1731896908062] 2024-11-18T02:30:03,163 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c4730a2bacf8,44819,1731896908062 already deleted, retry=false 2024-11-18T02:30:03,163 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c4730a2bacf8,44819,1731896908062 expired; onlineServers=0 2024-11-18T02:30:03,163 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c4730a2bacf8,32911,1731896907368' ***** 2024-11-18T02:30:03,163 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T02:30:03,163 INFO [M:0;c4730a2bacf8:32911 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:30:03,163 INFO [M:0;c4730a2bacf8:32911 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:30:03,164 DEBUG [M:0;c4730a2bacf8:32911 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T02:30:03,164 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T02:30:03,164 DEBUG [M:0;c4730a2bacf8:32911 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T02:30:03,164 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731896909053 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731896909053,5,FailOnTimeoutGroup] 2024-11-18T02:30:03,164 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731896909050 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731896909050,5,FailOnTimeoutGroup] 2024-11-18T02:30:03,164 INFO [M:0;c4730a2bacf8:32911 {}] hbase.ChoreService(370): Chore service for: master/c4730a2bacf8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T02:30:03,164 INFO [M:0;c4730a2bacf8:32911 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:30:03,164 DEBUG [M:0;c4730a2bacf8:32911 {}] master.HMaster(1795): Stopping service threads 2024-11-18T02:30:03,164 INFO [M:0;c4730a2bacf8:32911 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T02:30:03,164 INFO [M:0;c4730a2bacf8:32911 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:30:03,165 INFO [M:0;c4730a2bacf8:32911 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T02:30:03,165 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T02:30:03,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T02:30:03,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:03,166 DEBUG [M:0;c4730a2bacf8:32911 {}] zookeeper.ZKUtil(347): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T02:30:03,166 WARN [M:0;c4730a2bacf8:32911 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T02:30:03,167 INFO [M:0;c4730a2bacf8:32911 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/.lastflushedseqids 2024-11-18T02:30:03,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741854_1030 (size=130) 2024-11-18T02:30:03,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741854_1030 (size=130) 2024-11-18T02:30:03,183 INFO [M:0;c4730a2bacf8:32911 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T02:30:03,183 INFO [M:0;c4730a2bacf8:32911 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T02:30:03,183 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:30:03,183 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:03,183 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:03,183 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:30:03,184 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:03,184 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-18T02:30:03,201 INFO [regionserver/c4730a2bacf8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:30:03,210 DEBUG [M:0;c4730a2bacf8:32911 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1eff8f4604114b22a69708203c09933c is 82, key is hbase:meta,,1/info:regioninfo/1731896909879/Put/seqid=0 2024-11-18T02:30:03,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741855_1031 (size=5672) 2024-11-18T02:30:03,217 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1eff8f4604114b22a69708203c09933c 2024-11-18T02:30:03,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741855_1031 (size=5672) 2024-11-18T02:30:03,251 DEBUG [M:0;c4730a2bacf8:32911 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8ccc6e2690334115b357def86e2d52d6 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731896910680/Put/seqid=0 2024-11-18T02:30:03,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741856_1032 (size=6247) 2024-11-18T02:30:03,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741856_1032 (size=6247) 2024-11-18T02:30:03,260 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8ccc6e2690334115b357def86e2d52d6 2024-11-18T02:30:03,262 INFO [RS:0;c4730a2bacf8:44819 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:30:03,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:03,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44819-0x10128e77d600001, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:03,262 INFO [RS:0;c4730a2bacf8:44819 {}] regionserver.HRegionServer(1031): Exiting; stopping=c4730a2bacf8,44819,1731896908062; zookeeper connection closed. 2024-11-18T02:30:03,268 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5dc3afde {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5dc3afde 2024-11-18T02:30:03,269 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T02:30:03,270 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8ccc6e2690334115b357def86e2d52d6 2024-11-18T02:30:03,298 DEBUG [M:0;c4730a2bacf8:32911 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f4b362ec5afa4c3b9cf4afb197c91272 is 69, key is c4730a2bacf8,44819,1731896908062/rs:state/1731896909110/Put/seqid=0 2024-11-18T02:30:03,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741857_1033 (size=5156) 2024-11-18T02:30:03,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741857_1033 (size=5156) 2024-11-18T02:30:03,307 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f4b362ec5afa4c3b9cf4afb197c91272 2024-11-18T02:30:03,333 DEBUG [M:0;c4730a2bacf8:32911 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fcf347a3ff104247a117f83d4f0e2ef1 is 52, key is load_balancer_on/state:d/1731896910147/Put/seqid=0 2024-11-18T02:30:03,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741858_1034 (size=5056) 2024-11-18T02:30:03,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741858_1034 (size=5056) 2024-11-18T02:30:03,340 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fcf347a3ff104247a117f83d4f0e2ef1 2024-11-18T02:30:03,348 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1eff8f4604114b22a69708203c09933c as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1eff8f4604114b22a69708203c09933c 2024-11-18T02:30:03,355 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1eff8f4604114b22a69708203c09933c, entries=8, sequenceid=59, filesize=5.5 K 2024-11-18T02:30:03,357 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8ccc6e2690334115b357def86e2d52d6 as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8ccc6e2690334115b357def86e2d52d6 2024-11-18T02:30:03,363 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8ccc6e2690334115b357def86e2d52d6 2024-11-18T02:30:03,363 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8ccc6e2690334115b357def86e2d52d6, entries=6, sequenceid=59, filesize=6.1 K 2024-11-18T02:30:03,365 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f4b362ec5afa4c3b9cf4afb197c91272 as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f4b362ec5afa4c3b9cf4afb197c91272 2024-11-18T02:30:03,371 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f4b362ec5afa4c3b9cf4afb197c91272, entries=1, sequenceid=59, filesize=5.0 K 2024-11-18T02:30:03,372 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fcf347a3ff104247a117f83d4f0e2ef1 as hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fcf347a3ff104247a117f83d4f0e2ef1 2024-11-18T02:30:03,378 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fcf347a3ff104247a117f83d4f0e2ef1, entries=1, sequenceid=59, filesize=4.9 K 2024-11-18T02:30:03,380 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 196ms, sequenceid=59, compaction requested=false 2024-11-18T02:30:03,382 INFO [M:0;c4730a2bacf8:32911 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:03,382 DEBUG [M:0;c4730a2bacf8:32911 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897003183Disabling compacts and flushes for region at 1731897003183Disabling writes for close at 1731897003184 (+1 ms)Obtaining lock to block concurrent updates at 1731897003184Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731897003184Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731897003185 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731897003186 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731897003186Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731897003209 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731897003209Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731897003225 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731897003250 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731897003250Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731897003270 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731897003297 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731897003298 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731897003315 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731897003332 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731897003332Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3609ff7c: reopening flushed file at 1731897003347 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a60e4b3: reopening flushed file at 1731897003356 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42a88eaf: reopening flushed file at 1731897003364 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e5d51b0: reopening flushed file at 1731897003371 (+7 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 196ms, sequenceid=59, compaction requested=false at 1731897003380 (+9 ms)Writing region close event to WAL at 1731897003382 (+2 ms)Closed at 1731897003382 2024-11-18T02:30:03,383 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,383 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,383 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,383 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,383 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:03,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46563 is added to blk_1073741830_1006 (size=27973) 2024-11-18T02:30:03,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33973 is added to blk_1073741830_1006 (size=27973) 2024-11-18T02:30:03,386 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:30:03,386 INFO [M:0;c4730a2bacf8:32911 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T02:30:03,387 INFO [M:0;c4730a2bacf8:32911 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32911 2024-11-18T02:30:03,387 INFO [M:0;c4730a2bacf8:32911 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:30:03,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:03,489 INFO [M:0;c4730a2bacf8:32911 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:30:03,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32911-0x10128e77d600000, quorum=127.0.0.1:64207, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:03,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1467625d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:03,498 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:03,498 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:03,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:03,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:03,502 WARN [BP-1085259333-172.17.0.2-1731896904097 heartbeating to localhost/127.0.0.1:33317 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:03,502 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:03,502 WARN [BP-1085259333-172.17.0.2-1731896904097 heartbeating to localhost/127.0.0.1:33317 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1085259333-172.17.0.2-1731896904097 (Datanode Uuid 15fc9655-10b9-43cd-a1fc-f70e1b50c326) service to localhost/127.0.0.1:33317 2024-11-18T02:30:03,502 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:03,503 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/data/data3/current/BP-1085259333-172.17.0.2-1731896904097 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:03,503 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/data/data4/current/BP-1085259333-172.17.0.2-1731896904097 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:03,504 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:03,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c2fdbac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:03,506 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:03,506 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:03,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:03,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:03,508 WARN [BP-1085259333-172.17.0.2-1731896904097 heartbeating to localhost/127.0.0.1:33317 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:03,508 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:03,508 WARN [BP-1085259333-172.17.0.2-1731896904097 heartbeating to localhost/127.0.0.1:33317 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1085259333-172.17.0.2-1731896904097 (Datanode Uuid d0d61583-8fe2-420c-bddd-55f1f47f3ea9) service to localhost/127.0.0.1:33317 2024-11-18T02:30:03,508 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:03,509 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/data/data1/current/BP-1085259333-172.17.0.2-1731896904097 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:03,509 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/cluster_bd86d47e-54ed-f17a-fcf4-4c7201f79e76/data/data2/current/BP-1085259333-172.17.0.2-1731896904097 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:03,510 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:03,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:30:03,520 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:03,520 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:03,520 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:03,520 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:03,530 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T02:30:03,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T02:30:03,572 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33317 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33317 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: regionserver/c4730a2bacf8:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@efc635d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33317 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/c4730a2bacf8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/c4730a2bacf8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33317 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33317 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=203 (was 228), ProcessCount=11 (was 11), AvailableMemoryMB=3038 (was 3670) 2024-11-18T02:30:03,579 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=203, ProcessCount=11, AvailableMemoryMB=3038 2024-11-18T02:30:03,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T02:30:03,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.log.dir so I do NOT create it in target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776 2024-11-18T02:30:03,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4c2df9f-cbbf-11ed-8712-b041e6d0aad8/hadoop.tmp.dir so I do NOT create it in target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776 2024-11-18T02:30:03,580 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757, deleteOnExit=true 2024-11-18T02:30:03,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T02:30:03,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/test.cache.data in system properties and HBase conf 2024-11-18T02:30:03,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T02:30:03,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.log.dir in system properties and HBase conf 2024-11-18T02:30:03,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T02:30:03,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T02:30:03,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T02:30:03,581 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T02:30:03,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:30:03,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/nfs.dump.dir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/java.io.tmpdir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T02:30:03,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T02:30:03,598 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:30:03,680 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:03,687 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:03,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:03,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:03,688 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:30:03,689 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:03,689 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@687b21ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:03,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c69b616{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:03,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ba7fb41{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/java.io.tmpdir/jetty-localhost-43797-hadoop-hdfs-3_4_1-tests_jar-_-any-6593596090533125320/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:30:03,808 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f841e9{HTTP/1.1, (http/1.1)}{localhost:43797} 2024-11-18T02:30:03,808 INFO [Time-limited test {}] server.Server(415): Started @102264ms 2024-11-18T02:30:03,823 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:30:03,901 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:03,907 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:03,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:03,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:03,908 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:30:03,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74fa283c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:03,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9809f98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:04,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27105e7c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/java.io.tmpdir/jetty-localhost-37181-hadoop-hdfs-3_4_1-tests_jar-_-any-10137993109550485256/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:04,032 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e23c0c8{HTTP/1.1, (http/1.1)}{localhost:37181} 2024-11-18T02:30:04,032 INFO [Time-limited test {}] server.Server(415): Started @102489ms 2024-11-18T02:30:04,034 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:04,073 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:04,077 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:04,077 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:04,077 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:04,077 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:30:04,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b67c504{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:04,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11d14b2b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:04,152 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/data/data1/current/BP-342457722-172.17.0.2-1731897003617/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:04,153 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/data/data2/current/BP-342457722-172.17.0.2-1731897003617/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:04,178 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:04,181 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaedcd23b012c14da with lease ID 0x9f630e769fcc031e: Processing first storage report for DS-1a58f7d5-0b7a-4099-acae-b2e37f6a339d from datanode DatanodeRegistration(127.0.0.1:39023, datanodeUuid=a509a722-4b12-4237-bdf9-a6495eb9766b, infoPort=43111, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1491318107;c=1731897003617) 2024-11-18T02:30:04,181 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaedcd23b012c14da with lease ID 0x9f630e769fcc031e: from storage DS-1a58f7d5-0b7a-4099-acae-b2e37f6a339d node DatanodeRegistration(127.0.0.1:39023, datanodeUuid=a509a722-4b12-4237-bdf9-a6495eb9766b, infoPort=43111, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1491318107;c=1731897003617), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:04,182 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaedcd23b012c14da with lease ID 0x9f630e769fcc031e: Processing first storage report for DS-c542af1e-84f5-4f68-8545-816a1660f82b from datanode DatanodeRegistration(127.0.0.1:39023, datanodeUuid=a509a722-4b12-4237-bdf9-a6495eb9766b, infoPort=43111, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1491318107;c=1731897003617) 2024-11-18T02:30:04,182 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaedcd23b012c14da with lease ID 0x9f630e769fcc031e: from storage DS-c542af1e-84f5-4f68-8545-816a1660f82b node DatanodeRegistration(127.0.0.1:39023, datanodeUuid=a509a722-4b12-4237-bdf9-a6495eb9766b, infoPort=43111, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1491318107;c=1731897003617), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:04,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6951519{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/java.io.tmpdir/jetty-localhost-40349-hadoop-hdfs-3_4_1-tests_jar-_-any-1257096639393032061/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:04,201 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ce533a5{HTTP/1.1, (http/1.1)}{localhost:40349} 2024-11-18T02:30:04,201 INFO [Time-limited test {}] server.Server(415): Started @102657ms 2024-11-18T02:30:04,203 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:04,319 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/data/data3/current/BP-342457722-172.17.0.2-1731897003617/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:04,319 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/data/data4/current/BP-342457722-172.17.0.2-1731897003617/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:04,344 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:04,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb5fa62d6a17add9f with lease ID 0x9f630e769fcc031f: Processing first storage report for DS-00dbac7f-c79d-43ef-9a4c-57baa62aa79f from datanode DatanodeRegistration(127.0.0.1:36721, datanodeUuid=4754971d-0a12-48fb-b4b1-cd9770793f11, infoPort=41769, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=1491318107;c=1731897003617) 2024-11-18T02:30:04,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5fa62d6a17add9f with lease ID 0x9f630e769fcc031f: from storage DS-00dbac7f-c79d-43ef-9a4c-57baa62aa79f node DatanodeRegistration(127.0.0.1:36721, datanodeUuid=4754971d-0a12-48fb-b4b1-cd9770793f11, infoPort=41769, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=1491318107;c=1731897003617), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:04,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb5fa62d6a17add9f with lease ID 0x9f630e769fcc031f: Processing first storage report for DS-c6945e9a-d550-4f71-837a-597fa18d452d from datanode DatanodeRegistration(127.0.0.1:36721, datanodeUuid=4754971d-0a12-48fb-b4b1-cd9770793f11, infoPort=41769, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=1491318107;c=1731897003617) 2024-11-18T02:30:04,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5fa62d6a17add9f with lease ID 0x9f630e769fcc031f: from storage DS-c6945e9a-d550-4f71-837a-597fa18d452d node DatanodeRegistration(127.0.0.1:36721, datanodeUuid=4754971d-0a12-48fb-b4b1-cd9770793f11, infoPort=41769, infoSecurePort=0, ipcPort=35565, storageInfo=lv=-57;cid=testClusterID;nsid=1491318107;c=1731897003617), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:04,434 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776 2024-11-18T02:30:04,438 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/zookeeper_0, clientPort=51966, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T02:30:04,439 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51966 2024-11-18T02:30:04,440 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:04,442 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:04,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:30:04,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:30:04,456 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076 with version=8 2024-11-18T02:30:04,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/hbase-staging 2024-11-18T02:30:04,459 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:30:04,460 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:04,460 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:04,460 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:30:04,460 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:04,460 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:30:04,460 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T02:30:04,460 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:30:04,461 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38951 2024-11-18T02:30:04,463 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38951 connecting to ZooKeeper ensemble=127.0.0.1:51966 2024-11-18T02:30:04,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389510x0, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:30:04,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38951-0x10128e8fc250000 connected 2024-11-18T02:30:04,503 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:04,505 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:04,509 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:04,509 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076, hbase.cluster.distributed=false 2024-11-18T02:30:04,511 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:30:04,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38951 2024-11-18T02:30:04,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38951 2024-11-18T02:30:04,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38951 2024-11-18T02:30:04,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38951 2024-11-18T02:30:04,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38951 2024-11-18T02:30:04,534 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:30:04,535 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:04,535 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:04,535 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:30:04,535 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:04,535 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:30:04,535 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T02:30:04,535 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:30:04,537 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41993 2024-11-18T02:30:04,539 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41993 connecting to ZooKeeper ensemble=127.0.0.1:51966 2024-11-18T02:30:04,540 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:04,543 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:04,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419930x0, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:30:04,552 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:04,552 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41993-0x10128e8fc250001 connected 2024-11-18T02:30:04,552 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T02:30:04,556 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T02:30:04,557 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T02:30:04,558 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:30:04,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41993 2024-11-18T02:30:04,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41993 2024-11-18T02:30:04,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41993 2024-11-18T02:30:04,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41993 2024-11-18T02:30:04,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41993 2024-11-18T02:30:04,588 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c4730a2bacf8:38951 2024-11-18T02:30:04,588 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c4730a2bacf8,38951,1731897004459 2024-11-18T02:30:04,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:04,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:04,591 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c4730a2bacf8,38951,1731897004459 2024-11-18T02:30:04,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T02:30:04,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,594 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T02:30:04,595 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c4730a2bacf8,38951,1731897004459 from backup master directory 2024-11-18T02:30:04,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c4730a2bacf8,38951,1731897004459 2024-11-18T02:30:04,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:04,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:04,596 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:30:04,596 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c4730a2bacf8,38951,1731897004459 2024-11-18T02:30:04,602 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/hbase.id] with ID: 63883645-89c0-4ff6-af8c-d079fb9ca46b 2024-11-18T02:30:04,603 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/.tmp/hbase.id 2024-11-18T02:30:04,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:30:04,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:30:04,616 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/.tmp/hbase.id]:[hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/hbase.id] 2024-11-18T02:30:04,633 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:04,633 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T02:30:04,635 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T02:30:04,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:30:04,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:30:04,650 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:30:04,651 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T02:30:04,652 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:04,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:30:04,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:30:04,663 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store 2024-11-18T02:30:04,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:30:04,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:30:04,671 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:04,671 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:30:04,671 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:04,671 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:04,672 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:30:04,672 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:04,672 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:04,672 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897004671Disabling compacts and flushes for region at 1731897004671Disabling writes for close at 1731897004672 (+1 ms)Writing region close event to WAL at 1731897004672Closed at 1731897004672 2024-11-18T02:30:04,673 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/.initializing 2024-11-18T02:30:04,673 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/WALs/c4730a2bacf8,38951,1731897004459 2024-11-18T02:30:04,677 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C38951%2C1731897004459, suffix=, logDir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/WALs/c4730a2bacf8,38951,1731897004459, archiveDir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/oldWALs, maxLogs=10 2024-11-18T02:30:04,678 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C38951%2C1731897004459.1731897004677 2024-11-18T02:30:04,684 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/WALs/c4730a2bacf8,38951,1731897004459/c4730a2bacf8%2C38951%2C1731897004459.1731897004677 2024-11-18T02:30:04,688 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41769:41769),(127.0.0.1/127.0.0.1:43111:43111)] 2024-11-18T02:30:04,692 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:30:04,692 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:04,692 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,692 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T02:30:04,696 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:04,697 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:04,697 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T02:30:04,698 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:04,699 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:04,699 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T02:30:04,701 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:04,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:04,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T02:30:04,703 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:04,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:04,704 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,705 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,705 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,707 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,707 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,708 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T02:30:04,710 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:04,713 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:30:04,713 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781144, jitterRate=-0.006724074482917786}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T02:30:04,715 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731897004692Initializing all the Stores at 1731897004693 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897004694 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897004694Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897004694Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897004694Cleaning up temporary data from old regions at 1731897004707 (+13 ms)Region opened successfully at 1731897004715 (+8 ms) 2024-11-18T02:30:04,715 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T02:30:04,720 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8930b16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:30:04,722 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T02:30:04,722 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T02:30:04,722 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T02:30:04,722 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T02:30:04,723 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T02:30:04,723 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T02:30:04,723 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T02:30:04,726 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T02:30:04,728 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T02:30:04,730 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T02:30:04,730 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T02:30:04,731 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T02:30:04,732 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T02:30:04,733 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T02:30:04,734 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T02:30:04,735 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T02:30:04,736 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T02:30:04,739 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T02:30:04,742 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T02:30:04,744 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T02:30:04,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:04,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:04,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,747 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c4730a2bacf8,38951,1731897004459, sessionid=0x10128e8fc250000, setting cluster-up flag (Was=false) 2024-11-18T02:30:04,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,759 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T02:30:04,760 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,38951,1731897004459 2024-11-18T02:30:04,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:04,772 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T02:30:04,773 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,38951,1731897004459 2024-11-18T02:30:04,775 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T02:30:04,777 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:04,777 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T02:30:04,778 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T02:30:04,778 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c4730a2bacf8,38951,1731897004459 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T02:30:04,779 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:04,779 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:04,779 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:04,780 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:04,780 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c4730a2bacf8:0, corePoolSize=10, maxPoolSize=10 2024-11-18T02:30:04,780 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,780 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:30:04,780 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,781 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731897034781 2024-11-18T02:30:04,781 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T02:30:04,781 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T02:30:04,781 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T02:30:04,781 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T02:30:04,781 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T02:30:04,781 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T02:30:04,781 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,782 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T02:30:04,782 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:04,782 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T02:30:04,782 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T02:30:04,782 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T02:30:04,782 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T02:30:04,782 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T02:30:04,783 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897004782,5,FailOnTimeoutGroup] 2024-11-18T02:30:04,783 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897004783,5,FailOnTimeoutGroup] 2024-11-18T02:30:04,783 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,783 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T02:30:04,783 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,783 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:04,783 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,783 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T02:30:04,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:30:04,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:30:04,792 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T02:30:04,792 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076 2024-11-18T02:30:04,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:30:04,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:30:04,803 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:04,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:30:04,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:30:04,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:04,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:04,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:30:04,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:30:04,809 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:04,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:04,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:30:04,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:30:04,811 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:04,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:04,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:30:04,813 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:30:04,813 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:04,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:04,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:30:04,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740 2024-11-18T02:30:04,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740 2024-11-18T02:30:04,816 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:30:04,817 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:30:04,817 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:30:04,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:30:04,821 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:30:04,822 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762082, jitterRate=-0.03096294403076172}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:30:04,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731897004803Initializing all the Stores at 1731897004804 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897004804Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897004804Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897004804Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897004804Cleaning up temporary data from old regions at 1731897004817 (+13 ms)Region opened successfully at 1731897004823 (+6 ms) 2024-11-18T02:30:04,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:30:04,823 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:30:04,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:30:04,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:30:04,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:30:04,823 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:30:04,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897004823Disabling compacts and flushes for region at 1731897004823Disabling writes for close at 1731897004823Writing region close event to WAL at 1731897004823Closed at 1731897004823 2024-11-18T02:30:04,825 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:04,825 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T02:30:04,825 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T02:30:04,827 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:30:04,828 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T02:30:04,870 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(746): ClusterId : 63883645-89c0-4ff6-af8c-d079fb9ca46b 2024-11-18T02:30:04,871 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T02:30:04,873 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T02:30:04,873 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T02:30:04,876 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T02:30:04,876 DEBUG [RS:0;c4730a2bacf8:41993 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f00d9b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:30:04,890 DEBUG [RS:0;c4730a2bacf8:41993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c4730a2bacf8:41993 2024-11-18T02:30:04,890 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T02:30:04,890 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T02:30:04,890 DEBUG [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T02:30:04,891 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(2659): reportForDuty to master=c4730a2bacf8,38951,1731897004459 with port=41993, startcode=1731897004534 2024-11-18T02:30:04,891 DEBUG [RS:0;c4730a2bacf8:41993 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T02:30:04,894 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43553, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T02:30:04,895 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38951 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:04,895 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38951 {}] master.ServerManager(517): Registering regionserver=c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:04,897 DEBUG [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076 2024-11-18T02:30:04,897 DEBUG [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33521 2024-11-18T02:30:04,897 DEBUG [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T02:30:04,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:30:04,900 DEBUG [RS:0;c4730a2bacf8:41993 {}] zookeeper.ZKUtil(111): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:04,900 WARN [RS:0;c4730a2bacf8:41993 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:30:04,900 INFO [RS:0;c4730a2bacf8:41993 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:04,901 DEBUG [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/WALs/c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:04,901 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c4730a2bacf8,41993,1731897004534] 2024-11-18T02:30:04,906 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T02:30:04,911 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T02:30:04,914 INFO [RS:0;c4730a2bacf8:41993 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T02:30:04,914 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,916 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T02:30:04,918 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T02:30:04,918 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,918 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:04,919 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:30:04,919 DEBUG [RS:0;c4730a2bacf8:41993 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:30:04,919 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,919 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,919 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,919 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,919 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,919 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,41993,1731897004534-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:30:04,935 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T02:30:04,936 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,41993,1731897004534-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,936 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,936 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.Replication(171): c4730a2bacf8,41993,1731897004534 started 2024-11-18T02:30:04,952 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:04,952 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(1482): Serving as c4730a2bacf8,41993,1731897004534, RpcServer on c4730a2bacf8/172.17.0.2:41993, sessionid=0x10128e8fc250001 2024-11-18T02:30:04,953 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T02:30:04,953 DEBUG [RS:0;c4730a2bacf8:41993 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:04,953 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,41993,1731897004534' 2024-11-18T02:30:04,953 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T02:30:04,954 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T02:30:04,954 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T02:30:04,954 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T02:30:04,954 DEBUG [RS:0;c4730a2bacf8:41993 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:04,954 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,41993,1731897004534' 2024-11-18T02:30:04,954 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T02:30:04,955 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T02:30:04,955 DEBUG [RS:0;c4730a2bacf8:41993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T02:30:04,955 INFO [RS:0;c4730a2bacf8:41993 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T02:30:04,955 INFO [RS:0;c4730a2bacf8:41993 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T02:30:04,979 WARN [c4730a2bacf8:38951 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T02:30:05,058 INFO [RS:0;c4730a2bacf8:41993 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C41993%2C1731897004534, suffix=, logDir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/WALs/c4730a2bacf8,41993,1731897004534, archiveDir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/oldWALs, maxLogs=32 2024-11-18T02:30:05,060 INFO [RS:0;c4730a2bacf8:41993 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C41993%2C1731897004534.1731897005060 2024-11-18T02:30:05,067 INFO [RS:0;c4730a2bacf8:41993 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/WALs/c4730a2bacf8,41993,1731897004534/c4730a2bacf8%2C41993%2C1731897004534.1731897005060 2024-11-18T02:30:05,070 DEBUG [RS:0;c4730a2bacf8:41993 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41769:41769),(127.0.0.1/127.0.0.1:43111:43111)] 2024-11-18T02:30:05,229 DEBUG [c4730a2bacf8:38951 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T02:30:05,230 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:05,232 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,41993,1731897004534, state=OPENING 2024-11-18T02:30:05,242 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T02:30:05,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:05,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:05,248 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:30:05,248 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:05,248 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:05,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,41993,1731897004534}] 2024-11-18T02:30:05,403 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T02:30:05,405 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37931, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T02:30:05,409 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T02:30:05,410 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:05,412 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C41993%2C1731897004534.meta, suffix=.meta, logDir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/WALs/c4730a2bacf8,41993,1731897004534, archiveDir=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/oldWALs, maxLogs=32 2024-11-18T02:30:05,414 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C41993%2C1731897004534.meta.1731897005413.meta 2024-11-18T02:30:05,421 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/WALs/c4730a2bacf8,41993,1731897004534/c4730a2bacf8%2C41993%2C1731897004534.meta.1731897005413.meta 2024-11-18T02:30:05,425 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43111:43111),(127.0.0.1/127.0.0.1:41769:41769)] 2024-11-18T02:30:05,426 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:30:05,426 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T02:30:05,426 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T02:30:05,427 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T02:30:05,427 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T02:30:05,427 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:05,427 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T02:30:05,427 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T02:30:05,429 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:30:05,430 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:30:05,430 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:05,430 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:05,430 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:30:05,431 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:30:05,431 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:05,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:05,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:30:05,433 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:30:05,433 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:05,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:05,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:30:05,434 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:30:05,434 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:05,435 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:05,435 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:30:05,436 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740 2024-11-18T02:30:05,437 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740 2024-11-18T02:30:05,438 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:30:05,438 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:30:05,439 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:30:05,441 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:30:05,441 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750900, jitterRate=-0.04518221318721771}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:30:05,442 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T02:30:05,443 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731897005427Writing region info on filesystem at 1731897005427Initializing all the Stores at 1731897005428 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897005428Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897005428Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897005428Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897005428Cleaning up temporary data from old regions at 1731897005438 (+10 ms)Running coprocessor post-open hooks at 1731897005442 (+4 ms)Region opened successfully at 1731897005443 (+1 ms) 2024-11-18T02:30:05,444 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731897005402 2024-11-18T02:30:05,447 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T02:30:05,447 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T02:30:05,449 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:05,450 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,41993,1731897004534, state=OPEN 2024-11-18T02:30:05,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:30:05,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:30:05,455 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:05,455 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:05,455 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:05,458 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T02:30:05,458 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,41993,1731897004534 in 207 msec 2024-11-18T02:30:05,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T02:30:05,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 633 msec 2024-11-18T02:30:05,463 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:05,463 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T02:30:05,464 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:30:05,464 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,41993,1731897004534, seqNum=-1] 2024-11-18T02:30:05,465 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:30:05,466 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36619, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:30:05,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 695 msec 2024-11-18T02:30:05,473 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731897005473, completionTime=-1 2024-11-18T02:30:05,473 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T02:30:05,473 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T02:30:05,475 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T02:30:05,475 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731897065475 2024-11-18T02:30:05,475 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731897125475 2024-11-18T02:30:05,475 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T02:30:05,475 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,38951,1731897004459-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:05,475 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,38951,1731897004459-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:05,476 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,38951,1731897004459-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:05,476 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c4730a2bacf8:38951, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:05,476 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:05,476 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:05,477 DEBUG [master/c4730a2bacf8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T02:30:05,480 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.884sec 2024-11-18T02:30:05,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T02:30:05,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T02:30:05,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T02:30:05,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T02:30:05,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T02:30:05,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,38951,1731897004459-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:30:05,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,38951,1731897004459-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T02:30:05,483 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T02:30:05,484 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T02:30:05,484 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,38951,1731897004459-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:05,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56ff2226, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:30:05,571 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c4730a2bacf8,38951,-1 for getting cluster id 2024-11-18T02:30:05,571 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T02:30:05,574 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '63883645-89c0-4ff6-af8c-d079fb9ca46b' 2024-11-18T02:30:05,575 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T02:30:05,575 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "63883645-89c0-4ff6-af8c-d079fb9ca46b" 2024-11-18T02:30:05,575 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7047af98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:30:05,575 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c4730a2bacf8,38951,-1] 2024-11-18T02:30:05,575 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T02:30:05,576 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:05,578 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53286, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T02:30:05,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d607c51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:30:05,579 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:30:05,580 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,41993,1731897004534, seqNum=-1] 2024-11-18T02:30:05,581 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:30:05,583 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52604, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:30:05,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c4730a2bacf8,38951,1731897004459 2024-11-18T02:30:05,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:05,589 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T02:30:05,590 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T02:30:05,590 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:30:05,590 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:05,590 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:05,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:05,591 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T02:30:05,591 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T02:30:05,591 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1555122662, stopped=false 2024-11-18T02:30:05,591 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c4730a2bacf8,38951,1731897004459 2024-11-18T02:30:05,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:05,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:05,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:05,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:05,595 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:30:05,595 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:30:05,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:05,596 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:05,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:05,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:05,596 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c4730a2bacf8,41993,1731897004534' ***** 2024-11-18T02:30:05,596 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T02:30:05,596 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T02:30:05,596 INFO [RS:0;c4730a2bacf8:41993 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T02:30:05,596 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T02:30:05,596 INFO [RS:0;c4730a2bacf8:41993 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T02:30:05,597 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(959): stopping server c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:05,597 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:30:05,597 INFO [RS:0;c4730a2bacf8:41993 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c4730a2bacf8:41993. 2024-11-18T02:30:05,597 DEBUG [RS:0;c4730a2bacf8:41993 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:05,597 DEBUG [RS:0;c4730a2bacf8:41993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:05,597 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T02:30:05,597 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T02:30:05,597 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T02:30:05,597 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T02:30:05,597 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T02:30:05,597 DEBUG [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T02:30:05,598 DEBUG [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T02:30:05,598 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:30:05,598 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:30:05,598 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:30:05,598 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:30:05,598 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:30:05,598 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T02:30:05,616 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740/.tmp/ns/2ee9c9201b7240a98431a9c0d0e48ee9 is 43, key is default/ns:d/1731897005467/Put/seqid=0 2024-11-18T02:30:05,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741835_1011 (size=5153) 2024-11-18T02:30:05,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741835_1011 (size=5153) 2024-11-18T02:30:05,623 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740/.tmp/ns/2ee9c9201b7240a98431a9c0d0e48ee9 2024-11-18T02:30:05,631 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740/.tmp/ns/2ee9c9201b7240a98431a9c0d0e48ee9 as hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740/ns/2ee9c9201b7240a98431a9c0d0e48ee9 2024-11-18T02:30:05,638 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740/ns/2ee9c9201b7240a98431a9c0d0e48ee9, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T02:30:05,640 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false 2024-11-18T02:30:05,640 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T02:30:05,645 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T02:30:05,646 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:30:05,646 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:30:05,646 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897005597Running coprocessor pre-close hooks at 1731897005597Disabling compacts and flushes for region at 1731897005597Disabling writes for close at 1731897005598 (+1 ms)Obtaining lock to block concurrent updates at 1731897005598Preparing flush snapshotting stores in 1588230740 at 1731897005598Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731897005598Flushing stores of hbase:meta,,1.1588230740 at 1731897005599 (+1 ms)Flushing 1588230740/ns: creating writer at 1731897005599Flushing 1588230740/ns: appending metadata at 1731897005616 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731897005616Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3710c586: reopening flushed file at 1731897005630 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false at 1731897005640 (+10 ms)Writing region close event to WAL at 1731897005641 (+1 ms)Running coprocessor post-close hooks at 1731897005646 (+5 ms)Closed at 1731897005646 2024-11-18T02:30:05,646 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T02:30:05,798 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(976): stopping server c4730a2bacf8,41993,1731897004534; all regions closed. 2024-11-18T02:30:05,798 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,799 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,799 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,799 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,799 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741834_1010 (size=1152) 2024-11-18T02:30:05,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741834_1010 (size=1152) 2024-11-18T02:30:05,805 DEBUG [RS:0;c4730a2bacf8:41993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/oldWALs 2024-11-18T02:30:05,805 INFO [RS:0;c4730a2bacf8:41993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C41993%2C1731897004534.meta:.meta(num 1731897005413) 2024-11-18T02:30:05,805 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,805 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,805 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,805 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741833_1009 (size=93) 2024-11-18T02:30:05,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741833_1009 (size=93) 2024-11-18T02:30:05,810 DEBUG [RS:0;c4730a2bacf8:41993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/oldWALs 2024-11-18T02:30:05,810 INFO [RS:0;c4730a2bacf8:41993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C41993%2C1731897004534:(num 1731897005060) 2024-11-18T02:30:05,810 DEBUG [RS:0;c4730a2bacf8:41993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:05,810 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:30:05,810 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:30:05,810 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.ChoreService(370): Chore service for: regionserver/c4730a2bacf8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T02:30:05,811 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:30:05,811 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:30:05,811 INFO [RS:0;c4730a2bacf8:41993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41993 2024-11-18T02:30:05,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:30:05,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c4730a2bacf8,41993,1731897004534 2024-11-18T02:30:05,813 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:30:05,815 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c4730a2bacf8,41993,1731897004534] 2024-11-18T02:30:05,817 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c4730a2bacf8,41993,1731897004534 already deleted, retry=false 2024-11-18T02:30:05,817 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c4730a2bacf8,41993,1731897004534 expired; onlineServers=0 2024-11-18T02:30:05,817 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c4730a2bacf8,38951,1731897004459' ***** 2024-11-18T02:30:05,817 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T02:30:05,817 INFO [M:0;c4730a2bacf8:38951 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:30:05,817 INFO [M:0;c4730a2bacf8:38951 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:30:05,817 DEBUG [M:0;c4730a2bacf8:38951 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T02:30:05,817 DEBUG [M:0;c4730a2bacf8:38951 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T02:30:05,817 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T02:30:05,817 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897004782 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897004782,5,FailOnTimeoutGroup] 2024-11-18T02:30:05,818 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897004783 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897004783,5,FailOnTimeoutGroup] 2024-11-18T02:30:05,818 INFO [M:0;c4730a2bacf8:38951 {}] hbase.ChoreService(370): Chore service for: master/c4730a2bacf8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T02:30:05,818 INFO [M:0;c4730a2bacf8:38951 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:30:05,818 DEBUG [M:0;c4730a2bacf8:38951 {}] master.HMaster(1795): Stopping service threads 2024-11-18T02:30:05,818 INFO [M:0;c4730a2bacf8:38951 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T02:30:05,818 INFO [M:0;c4730a2bacf8:38951 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:30:05,818 INFO [M:0;c4730a2bacf8:38951 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T02:30:05,818 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T02:30:05,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T02:30:05,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:05,820 DEBUG [M:0;c4730a2bacf8:38951 {}] zookeeper.ZKUtil(347): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T02:30:05,820 WARN [M:0;c4730a2bacf8:38951 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T02:30:05,821 INFO [M:0;c4730a2bacf8:38951 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/.lastflushedseqids 2024-11-18T02:30:05,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741836_1012 (size=99) 2024-11-18T02:30:05,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741836_1012 (size=99) 2024-11-18T02:30:05,828 INFO [M:0;c4730a2bacf8:38951 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T02:30:05,828 INFO [M:0;c4730a2bacf8:38951 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T02:30:05,828 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:30:05,828 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:05,828 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:05,828 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:30:05,828 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:05,828 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T02:30:05,846 DEBUG [M:0;c4730a2bacf8:38951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8246559b70044a4eb1d04b0b957c98bb is 82, key is hbase:meta,,1/info:regioninfo/1731897005448/Put/seqid=0 2024-11-18T02:30:05,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741837_1013 (size=5672) 2024-11-18T02:30:05,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741837_1013 (size=5672) 2024-11-18T02:30:05,853 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8246559b70044a4eb1d04b0b957c98bb 2024-11-18T02:30:05,884 DEBUG [M:0;c4730a2bacf8:38951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/77ba9ba1b8074cdeb28ba4a9fdf65265 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731897005472/Put/seqid=0 2024-11-18T02:30:05,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741838_1014 (size=5275) 2024-11-18T02:30:05,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741838_1014 (size=5275) 2024-11-18T02:30:05,890 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/77ba9ba1b8074cdeb28ba4a9fdf65265 2024-11-18T02:30:05,912 DEBUG [M:0;c4730a2bacf8:38951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ca5b5259d2c440b29f157637472fa8b4 is 69, key is c4730a2bacf8,41993,1731897004534/rs:state/1731897004895/Put/seqid=0 2024-11-18T02:30:05,915 INFO [RS:0;c4730a2bacf8:41993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:30:05,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:05,915 INFO [RS:0;c4730a2bacf8:41993 {}] regionserver.HRegionServer(1031): Exiting; stopping=c4730a2bacf8,41993,1731897004534; zookeeper connection closed. 2024-11-18T02:30:05,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41993-0x10128e8fc250001, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:05,916 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7cf21c7f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7cf21c7f 2024-11-18T02:30:05,916 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T02:30:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741839_1015 (size=5156) 2024-11-18T02:30:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741839_1015 (size=5156) 2024-11-18T02:30:05,919 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ca5b5259d2c440b29f157637472fa8b4 2024-11-18T02:30:05,940 DEBUG [M:0;c4730a2bacf8:38951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4960e2c825b54392abb406d573bbee52 is 52, key is load_balancer_on/state:d/1731897005588/Put/seqid=0 2024-11-18T02:30:05,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741840_1016 (size=5056) 2024-11-18T02:30:05,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741840_1016 (size=5056) 2024-11-18T02:30:05,946 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4960e2c825b54392abb406d573bbee52 2024-11-18T02:30:05,953 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8246559b70044a4eb1d04b0b957c98bb as hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8246559b70044a4eb1d04b0b957c98bb 2024-11-18T02:30:05,960 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8246559b70044a4eb1d04b0b957c98bb, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T02:30:05,961 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/77ba9ba1b8074cdeb28ba4a9fdf65265 as hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/77ba9ba1b8074cdeb28ba4a9fdf65265 2024-11-18T02:30:05,967 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/77ba9ba1b8074cdeb28ba4a9fdf65265, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T02:30:05,969 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ca5b5259d2c440b29f157637472fa8b4 as hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ca5b5259d2c440b29f157637472fa8b4 2024-11-18T02:30:05,975 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ca5b5259d2c440b29f157637472fa8b4, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T02:30:05,976 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4960e2c825b54392abb406d573bbee52 as hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4960e2c825b54392abb406d573bbee52 2024-11-18T02:30:05,981 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33521/user/jenkins/test-data/04141ce4-16f0-329c-3705-b98421eec076/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4960e2c825b54392abb406d573bbee52, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T02:30:05,982 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=29, compaction requested=false 2024-11-18T02:30:05,984 INFO [M:0;c4730a2bacf8:38951 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:05,984 DEBUG [M:0;c4730a2bacf8:38951 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897005828Disabling compacts and flushes for region at 1731897005828Disabling writes for close at 1731897005828Obtaining lock to block concurrent updates at 1731897005828Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731897005828Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731897005829 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731897005829Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731897005830 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731897005846 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731897005846Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731897005860 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731897005883 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731897005883Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731897005896 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731897005912 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731897005912Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731897005925 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731897005940 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731897005940Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46005eb9: reopening flushed file at 1731897005952 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18cbdbff: reopening flushed file at 1731897005960 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bbe59cc: reopening flushed file at 1731897005967 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47e5ac18: reopening flushed file at 1731897005975 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=29, compaction requested=false at 1731897005982 (+7 ms)Writing region close event to WAL at 1731897005984 (+2 ms)Closed at 1731897005984 2024-11-18T02:30:05,985 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,985 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,985 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,985 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,985 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:05,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39023 is added to blk_1073741830_1006 (size=10311) 2024-11-18T02:30:05,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36721 is added to blk_1073741830_1006 (size=10311) 2024-11-18T02:30:05,989 INFO [M:0;c4730a2bacf8:38951 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T02:30:05,989 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:30:05,989 INFO [M:0;c4730a2bacf8:38951 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38951 2024-11-18T02:30:05,989 INFO [M:0;c4730a2bacf8:38951 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:30:06,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:06,091 INFO [M:0;c4730a2bacf8:38951 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:30:06,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38951-0x10128e8fc250000, quorum=127.0.0.1:51966, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:06,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6951519{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:06,094 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ce533a5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:06,094 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:06,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11d14b2b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:06,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b67c504{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:06,096 WARN [BP-342457722-172.17.0.2-1731897003617 heartbeating to localhost/127.0.0.1:33521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:06,096 WARN [BP-342457722-172.17.0.2-1731897003617 heartbeating to localhost/127.0.0.1:33521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-342457722-172.17.0.2-1731897003617 (Datanode Uuid 4754971d-0a12-48fb-b4b1-cd9770793f11) service to localhost/127.0.0.1:33521 2024-11-18T02:30:06,096 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:06,096 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:06,097 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/data/data3/current/BP-342457722-172.17.0.2-1731897003617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:06,097 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/data/data4/current/BP-342457722-172.17.0.2-1731897003617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:06,097 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:06,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27105e7c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:06,102 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e23c0c8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:06,102 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:06,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9809f98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:06,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74fa283c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:06,103 WARN [BP-342457722-172.17.0.2-1731897003617 heartbeating to localhost/127.0.0.1:33521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:06,103 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:06,104 WARN [BP-342457722-172.17.0.2-1731897003617 heartbeating to localhost/127.0.0.1:33521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-342457722-172.17.0.2-1731897003617 (Datanode Uuid a509a722-4b12-4237-bdf9-a6495eb9766b) service to localhost/127.0.0.1:33521 2024-11-18T02:30:06,104 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:06,104 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/data/data1/current/BP-342457722-172.17.0.2-1731897003617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:06,104 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/cluster_fe63646d-44f4-3299-5068-0aafce14b757/data/data2/current/BP-342457722-172.17.0.2-1731897003617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:06,105 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:06,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ba7fb41{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:30:06,112 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f841e9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:06,112 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:06,112 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c69b616{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:06,112 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@687b21ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:06,119 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T02:30:06,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T02:30:06,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T02:30:06,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.log.dir so I do NOT create it in target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde 2024-11-18T02:30:06,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/adda3a54-b282-40dc-b144-5feeb7f3f776/hadoop.tmp.dir so I do NOT create it in target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde 2024-11-18T02:30:06,140 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c, deleteOnExit=true 2024-11-18T02:30:06,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T02:30:06,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/test.cache.data in system properties and HBase conf 2024-11-18T02:30:06,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T02:30:06,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir in system properties and HBase conf 2024-11-18T02:30:06,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T02:30:06,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T02:30:06,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T02:30:06,141 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T02:30:06,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/nfs.dump.dir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/java.io.tmpdir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:30:06,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T02:30:06,143 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T02:30:06,157 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:30:06,232 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:06,240 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:06,248 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:06,248 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:06,248 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:30:06,249 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:06,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5493a194{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:06,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67cf8368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:06,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10fae299{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/java.io.tmpdir/jetty-localhost-46855-hadoop-hdfs-3_4_1-tests_jar-_-any-3913066946339787025/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:30:06,370 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ab5393f{HTTP/1.1, (http/1.1)}{localhost:46855} 2024-11-18T02:30:06,370 INFO [Time-limited test {}] server.Server(415): Started @104826ms 2024-11-18T02:30:06,385 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:30:06,456 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:06,460 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:06,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:06,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:06,460 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:30:06,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72bec19e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:06,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e195dbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:06,579 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@805be33{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/java.io.tmpdir/jetty-localhost-44261-hadoop-hdfs-3_4_1-tests_jar-_-any-14654843992753746690/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:06,579 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@372d60ec{HTTP/1.1, (http/1.1)}{localhost:44261} 2024-11-18T02:30:06,579 INFO [Time-limited test {}] server.Server(415): Started @105035ms 2024-11-18T02:30:06,581 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:06,617 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:06,621 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:06,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:06,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:06,621 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:30:06,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35026af9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:06,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63c2c4fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:06,699 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data1/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:06,699 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data2/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:06,726 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:06,730 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x171ed95308dd1ed5 with lease ID 0x62472321fbf7b327: Processing first storage report for DS-06189253-4a72-450b-8da7-8e486ea0adb9 from datanode DatanodeRegistration(127.0.0.1:44805, datanodeUuid=9c65b774-e3f5-465f-ad31-04e2403fc80d, infoPort=42453, infoSecurePort=0, ipcPort=33871, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:06,730 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x171ed95308dd1ed5 with lease ID 0x62472321fbf7b327: from storage DS-06189253-4a72-450b-8da7-8e486ea0adb9 node DatanodeRegistration(127.0.0.1:44805, datanodeUuid=9c65b774-e3f5-465f-ad31-04e2403fc80d, infoPort=42453, infoSecurePort=0, ipcPort=33871, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T02:30:06,730 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x171ed95308dd1ed5 with lease ID 0x62472321fbf7b327: Processing first storage report for DS-fb835548-6235-4d88-b652-9252428da8d7 from datanode DatanodeRegistration(127.0.0.1:44805, datanodeUuid=9c65b774-e3f5-465f-ad31-04e2403fc80d, infoPort=42453, infoSecurePort=0, ipcPort=33871, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:06,730 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x171ed95308dd1ed5 with lease ID 0x62472321fbf7b327: from storage DS-fb835548-6235-4d88-b652-9252428da8d7 node DatanodeRegistration(127.0.0.1:44805, datanodeUuid=9c65b774-e3f5-465f-ad31-04e2403fc80d, infoPort=42453, infoSecurePort=0, ipcPort=33871, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:06,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@461a42e8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/java.io.tmpdir/jetty-localhost-42513-hadoop-hdfs-3_4_1-tests_jar-_-any-8490585758463649587/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:06,743 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@217a95d6{HTTP/1.1, (http/1.1)}{localhost:42513} 2024-11-18T02:30:06,743 INFO [Time-limited test {}] server.Server(415): Started @105199ms 2024-11-18T02:30:06,745 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:06,850 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data4/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:06,850 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data3/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:06,869 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:06,872 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e667730a34e1d87 with lease ID 0x62472321fbf7b328: Processing first storage report for DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8 from datanode DatanodeRegistration(127.0.0.1:40223, datanodeUuid=35b9dbaa-4267-42f2-98b7-389d3e5f4a4e, infoPort=38707, infoSecurePort=0, ipcPort=46279, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:06,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e667730a34e1d87 with lease ID 0x62472321fbf7b328: from storage DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8 node DatanodeRegistration(127.0.0.1:40223, datanodeUuid=35b9dbaa-4267-42f2-98b7-389d3e5f4a4e, infoPort=38707, infoSecurePort=0, ipcPort=46279, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:06,873 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e667730a34e1d87 with lease ID 0x62472321fbf7b328: Processing first storage report for DS-61c17e9d-d1fb-45a8-aa47-87b975fa7f2c from datanode DatanodeRegistration(127.0.0.1:40223, datanodeUuid=35b9dbaa-4267-42f2-98b7-389d3e5f4a4e, infoPort=38707, infoSecurePort=0, ipcPort=46279, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:06,873 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e667730a34e1d87 with lease ID 0x62472321fbf7b328: from storage DS-61c17e9d-d1fb-45a8-aa47-87b975fa7f2c node DatanodeRegistration(127.0.0.1:40223, datanodeUuid=35b9dbaa-4267-42f2-98b7-389d3e5f4a4e, infoPort=38707, infoSecurePort=0, ipcPort=46279, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:06,874 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde 2024-11-18T02:30:06,880 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/zookeeper_0, clientPort=55099, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T02:30:06,881 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55099 2024-11-18T02:30:06,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:06,883 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:06,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44805 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:30:06,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40223 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:30:06,895 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29 with version=8 2024-11-18T02:30:06,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/hbase-staging 2024-11-18T02:30:06,897 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:30:06,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:06,898 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:06,898 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:30:06,898 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:06,898 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:30:06,898 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T02:30:06,898 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:30:06,899 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36945 2024-11-18T02:30:06,901 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36945 connecting to ZooKeeper ensemble=127.0.0.1:55099 2024-11-18T02:30:06,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:369450x0, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:30:06,916 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36945-0x10128e905af0000 connected 2024-11-18T02:30:06,920 INFO [regionserver/c4730a2bacf8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:30:06,942 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:06,944 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:06,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:06,948 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29, hbase.cluster.distributed=false 2024-11-18T02:30:06,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:30:06,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36945 2024-11-18T02:30:06,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36945 2024-11-18T02:30:06,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36945 2024-11-18T02:30:06,955 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36945 2024-11-18T02:30:06,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36945 2024-11-18T02:30:06,974 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:30:06,974 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:06,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:06,975 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:30:06,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:06,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:30:06,975 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T02:30:06,975 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:30:06,977 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42723 2024-11-18T02:30:06,978 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42723 connecting to ZooKeeper ensemble=127.0.0.1:55099 2024-11-18T02:30:06,979 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:06,983 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:06,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427230x0, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:30:06,997 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:427230x0, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:06,997 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T02:30:07,000 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42723-0x10128e905af0001 connected 2024-11-18T02:30:07,005 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T02:30:07,006 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T02:30:07,008 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:30:07,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42723 2024-11-18T02:30:07,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42723 2024-11-18T02:30:07,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42723 2024-11-18T02:30:07,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42723 2024-11-18T02:30:07,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42723 2024-11-18T02:30:07,035 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c4730a2bacf8:36945 2024-11-18T02:30:07,036 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:07,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:07,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:07,040 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:07,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T02:30:07,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,046 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T02:30:07,046 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c4730a2bacf8,36945,1731897006897 from backup master directory 2024-11-18T02:30:07,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:07,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:07,050 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:30:07,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:07,050 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:07,056 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/hbase.id] with ID: edf7910d-f83d-4c70-ae50-df5df96ea6ac 2024-11-18T02:30:07,056 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/.tmp/hbase.id 2024-11-18T02:30:07,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40223 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:30:07,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44805 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:30:07,071 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/.tmp/hbase.id]:[hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/hbase.id] 2024-11-18T02:30:07,088 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:07,088 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T02:30:07,090 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T02:30:07,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40223 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:30:07,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44805 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:30:07,111 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:30:07,113 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T02:30:07,113 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:07,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44805 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:30:07,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40223 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:30:07,128 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store 2024-11-18T02:30:07,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44805 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:30:07,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40223 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:30:07,141 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:07,141 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:30:07,141 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:07,141 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:07,141 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:30:07,141 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:07,141 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:07,141 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897007141Disabling compacts and flushes for region at 1731897007141Disabling writes for close at 1731897007141Writing region close event to WAL at 1731897007141Closed at 1731897007141 2024-11-18T02:30:07,143 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/.initializing 2024-11-18T02:30:07,143 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:07,146 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C36945%2C1731897006897, suffix=, logDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897, archiveDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/oldWALs, maxLogs=10 2024-11-18T02:30:07,146 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C36945%2C1731897006897.1731897007146 2024-11-18T02:30:07,154 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897007146 2024-11-18T02:30:07,160 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38707:38707),(127.0.0.1/127.0.0.1:42453:42453)] 2024-11-18T02:30:07,161 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:30:07,162 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:07,162 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,162 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,164 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,166 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T02:30:07,166 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,166 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:07,167 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T02:30:07,168 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:07,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T02:30:07,171 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:07,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T02:30:07,173 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:07,174 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,175 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,175 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,177 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,177 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,178 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T02:30:07,179 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:07,182 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:30:07,183 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758334, jitterRate=-0.03572899103164673}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T02:30:07,184 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731897007162Initializing all the Stores at 1731897007163 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897007163Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897007163Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897007164 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897007164Cleaning up temporary data from old regions at 1731897007177 (+13 ms)Region opened successfully at 1731897007184 (+7 ms) 2024-11-18T02:30:07,188 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T02:30:07,192 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@135dbc22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:30:07,193 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T02:30:07,193 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T02:30:07,193 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T02:30:07,193 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T02:30:07,196 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 2 msec 2024-11-18T02:30:07,196 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T02:30:07,197 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T02:30:07,208 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T02:30:07,209 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T02:30:07,211 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T02:30:07,211 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T02:30:07,212 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T02:30:07,214 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T02:30:07,214 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T02:30:07,217 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T02:30:07,220 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T02:30:07,221 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T02:30:07,222 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T02:30:07,225 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T02:30:07,226 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T02:30:07,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:07,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:07,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,233 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c4730a2bacf8,36945,1731897006897, sessionid=0x10128e905af0000, setting cluster-up flag (Was=false) 2024-11-18T02:30:07,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,246 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T02:30:07,248 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:07,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,261 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T02:30:07,262 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:07,264 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T02:30:07,267 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:07,267 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T02:30:07,267 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T02:30:07,267 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c4730a2bacf8,36945,1731897006897 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T02:30:07,269 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:07,269 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:07,269 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:07,269 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:07,270 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c4730a2bacf8:0, corePoolSize=10, maxPoolSize=10 2024-11-18T02:30:07,270 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,270 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:30:07,270 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,278 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:07,278 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T02:30:07,280 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,280 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T02:30:07,285 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731897037285 2024-11-18T02:30:07,285 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T02:30:07,285 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T02:30:07,285 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T02:30:07,285 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T02:30:07,285 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T02:30:07,286 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T02:30:07,286 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,286 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T02:30:07,286 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T02:30:07,286 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T02:30:07,288 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T02:30:07,288 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T02:30:07,291 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897007288,5,FailOnTimeoutGroup] 2024-11-18T02:30:07,291 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897007291,5,FailOnTimeoutGroup] 2024-11-18T02:30:07,291 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,291 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T02:30:07,291 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,292 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44805 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:30:07,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40223 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:30:07,297 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T02:30:07,297 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29 2024-11-18T02:30:07,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44805 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:30:07,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40223 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:30:07,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:07,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:30:07,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:30:07,322 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:07,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:30:07,325 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:30:07,325 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:07,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:30:07,326 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(746): ClusterId : edf7910d-f83d-4c70-ae50-df5df96ea6ac 2024-11-18T02:30:07,326 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T02:30:07,327 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:30:07,327 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:07,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:30:07,330 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T02:30:07,331 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T02:30:07,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:30:07,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:07,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:30:07,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740 2024-11-18T02:30:07,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740 2024-11-18T02:30:07,333 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T02:30:07,334 DEBUG [RS:0;c4730a2bacf8:42723 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b567beb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:30:07,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:30:07,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:30:07,335 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:30:07,345 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:30:07,350 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:30:07,350 DEBUG [RS:0;c4730a2bacf8:42723 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c4730a2bacf8:42723 2024-11-18T02:30:07,350 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884459, jitterRate=0.12464894354343414}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:30:07,351 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T02:30:07,351 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T02:30:07,351 DEBUG [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T02:30:07,352 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731897007319Initializing all the Stores at 1731897007320 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897007320Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897007320Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897007321 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897007321Cleaning up temporary data from old regions at 1731897007335 (+14 ms)Region opened successfully at 1731897007352 (+17 ms) 2024-11-18T02:30:07,352 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:30:07,352 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:30:07,352 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(2659): reportForDuty to master=c4730a2bacf8,36945,1731897006897 with port=42723, startcode=1731897006974 2024-11-18T02:30:07,352 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:30:07,352 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:30:07,352 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:30:07,352 DEBUG [RS:0;c4730a2bacf8:42723 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T02:30:07,354 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:30:07,354 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897007352Disabling compacts and flushes for region at 1731897007352Disabling writes for close at 1731897007352Writing region close event to WAL at 1731897007354 (+2 ms)Closed at 1731897007354 2024-11-18T02:30:07,356 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:07,356 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T02:30:07,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T02:30:07,357 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:30:07,359 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T02:30:07,365 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37727, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T02:30:07,365 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36945 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:07,365 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36945 {}] master.ServerManager(517): Registering regionserver=c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:07,367 DEBUG [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29 2024-11-18T02:30:07,367 DEBUG [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43001 2024-11-18T02:30:07,367 DEBUG [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T02:30:07,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:30:07,372 DEBUG [RS:0;c4730a2bacf8:42723 {}] zookeeper.ZKUtil(111): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:07,372 WARN [RS:0;c4730a2bacf8:42723 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:30:07,372 INFO [RS:0;c4730a2bacf8:42723 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:07,372 DEBUG [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:07,372 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c4730a2bacf8,42723,1731897006974] 2024-11-18T02:30:07,375 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T02:30:07,378 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T02:30:07,380 INFO [RS:0;c4730a2bacf8:42723 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T02:30:07,380 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,380 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T02:30:07,382 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T02:30:07,382 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,382 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,383 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:07,383 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:30:07,383 DEBUG [RS:0;c4730a2bacf8:42723 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:30:07,385 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,386 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,386 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,386 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,386 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,386 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,42723,1731897006974-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:30:07,411 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T02:30:07,411 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,42723,1731897006974-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,411 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,411 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.Replication(171): c4730a2bacf8,42723,1731897006974 started 2024-11-18T02:30:07,430 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:07,430 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(1482): Serving as c4730a2bacf8,42723,1731897006974, RpcServer on c4730a2bacf8/172.17.0.2:42723, sessionid=0x10128e905af0001 2024-11-18T02:30:07,430 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T02:30:07,430 DEBUG [RS:0;c4730a2bacf8:42723 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:07,430 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,42723,1731897006974' 2024-11-18T02:30:07,430 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T02:30:07,431 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T02:30:07,432 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T02:30:07,432 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T02:30:07,432 DEBUG [RS:0;c4730a2bacf8:42723 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:07,432 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,42723,1731897006974' 2024-11-18T02:30:07,432 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T02:30:07,432 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T02:30:07,433 DEBUG [RS:0;c4730a2bacf8:42723 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T02:30:07,433 INFO [RS:0;c4730a2bacf8:42723 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T02:30:07,433 INFO [RS:0;c4730a2bacf8:42723 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T02:30:07,509 WARN [c4730a2bacf8:36945 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T02:30:07,536 INFO [RS:0;c4730a2bacf8:42723 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C42723%2C1731897006974, suffix=, logDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974, archiveDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs, maxLogs=32 2024-11-18T02:30:07,538 INFO [RS:0;c4730a2bacf8:42723 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C42723%2C1731897006974.1731897007537 2024-11-18T02:30:07,554 INFO [RS:0;c4730a2bacf8:42723 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 2024-11-18T02:30:07,593 DEBUG [RS:0;c4730a2bacf8:42723 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38707:38707),(127.0.0.1/127.0.0.1:42453:42453)] 2024-11-18T02:30:07,759 DEBUG [c4730a2bacf8:36945 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T02:30:07,760 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:07,762 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,42723,1731897006974, state=OPENING 2024-11-18T02:30:07,764 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T02:30:07,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:07,766 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:30:07,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,42723,1731897006974}] 2024-11-18T02:30:07,767 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:07,767 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:07,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:30:07,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:30:07,771 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T02:30:07,923 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T02:30:07,926 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42303, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T02:30:07,930 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T02:30:07,930 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:07,933 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C42723%2C1731897006974.meta, suffix=.meta, logDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974, archiveDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs, maxLogs=32 2024-11-18T02:30:07,934 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta 2024-11-18T02:30:07,940 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta 2024-11-18T02:30:07,943 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42453:42453),(127.0.0.1/127.0.0.1:38707:38707)] 2024-11-18T02:30:07,944 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:30:07,945 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T02:30:07,945 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T02:30:07,945 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T02:30:07,945 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T02:30:07,945 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:07,945 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T02:30:07,945 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T02:30:07,947 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:30:07,948 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:30:07,948 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:07,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:30:07,950 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:30:07,950 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:07,951 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:30:07,952 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:30:07,952 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:07,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:30:07,953 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:30:07,953 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:07,954 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:07,954 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:30:07,955 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740 2024-11-18T02:30:07,956 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740 2024-11-18T02:30:07,958 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:30:07,958 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:30:07,959 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:30:07,960 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:30:07,961 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690659, jitterRate=-0.12178273499011993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:30:07,962 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T02:30:07,962 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731897007945Writing region info on filesystem at 1731897007945Initializing all the Stores at 1731897007946 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897007946Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897007947 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897007947Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897007947Cleaning up temporary data from old regions at 1731897007958 (+11 ms)Running coprocessor post-open hooks at 1731897007962 (+4 ms)Region opened successfully at 1731897007962 2024-11-18T02:30:07,964 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731897007923 2024-11-18T02:30:07,968 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T02:30:07,968 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T02:30:07,969 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:07,970 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,42723,1731897006974, state=OPEN 2024-11-18T02:30:07,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:30:07,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:30:07,978 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:07,978 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:07,978 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:07,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T02:30:07,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,42723,1731897006974 in 211 msec 2024-11-18T02:30:07,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T02:30:07,986 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 626 msec 2024-11-18T02:30:07,987 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:07,987 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T02:30:07,988 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:30:07,988 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,42723,1731897006974, seqNum=-1] 2024-11-18T02:30:07,989 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:30:07,990 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60413, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:30:07,998 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 730 msec 2024-11-18T02:30:07,998 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731897007998, completionTime=-1 2024-11-18T02:30:07,998 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T02:30:07,998 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T02:30:08,001 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T02:30:08,001 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731897068001 2024-11-18T02:30:08,001 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731897128001 2024-11-18T02:30:08,001 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T02:30:08,001 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,36945,1731897006897-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,001 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,36945,1731897006897-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,001 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,36945,1731897006897-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,001 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c4730a2bacf8:36945, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,001 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,002 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,003 DEBUG [master/c4730a2bacf8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T02:30:08,006 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.955sec 2024-11-18T02:30:08,006 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T02:30:08,006 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T02:30:08,006 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T02:30:08,006 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T02:30:08,006 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T02:30:08,006 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,36945,1731897006897-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:30:08,006 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,36945,1731897006897-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T02:30:08,008 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T02:30:08,008 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T02:30:08,008 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,36945,1731897006897-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:08,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:08,026 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6333ed10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:30:08,026 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c4730a2bacf8,36945,-1 for getting cluster id 2024-11-18T02:30:08,026 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T02:30:08,029 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'edf7910d-f83d-4c70-ae50-df5df96ea6ac' 2024-11-18T02:30:08,029 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T02:30:08,029 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "edf7910d-f83d-4c70-ae50-df5df96ea6ac" 2024-11-18T02:30:08,030 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ed00ef8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:30:08,030 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c4730a2bacf8,36945,-1] 2024-11-18T02:30:08,030 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T02:30:08,032 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:08,034 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51036, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T02:30:08,035 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e800148, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:30:08,036 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:30:08,037 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,42723,1731897006974, seqNum=-1] 2024-11-18T02:30:08,038 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:30:08,039 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60462, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:30:08,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:08,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:08,045 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T02:30:08,067 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:30:08,067 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:08,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:08,068 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:30:08,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:08,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:30:08,068 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T02:30:08,068 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:30:08,069 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33905 2024-11-18T02:30:08,070 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33905 connecting to ZooKeeper ensemble=127.0.0.1:55099 2024-11-18T02:30:08,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:08,072 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:08,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339050x0, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:30:08,077 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:339050x0, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-18T02:30:08,078 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-18T02:30:08,081 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T02:30:08,091 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33905-0x10128e905af0002 connected 2024-11-18T02:30:08,092 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T02:30:08,093 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:33905-0x10128e905af0002, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T02:30:08,094 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33905-0x10128e905af0002, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:30:08,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33905 2024-11-18T02:30:08,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33905 2024-11-18T02:30:08,109 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33905 2024-11-18T02:30:08,109 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33905 2024-11-18T02:30:08,109 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33905 2024-11-18T02:30:08,110 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(746): ClusterId : edf7910d-f83d-4c70-ae50-df5df96ea6ac 2024-11-18T02:30:08,110 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T02:30:08,114 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T02:30:08,114 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T02:30:08,117 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T02:30:08,117 DEBUG [RS:1;c4730a2bacf8:33905 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8e15db8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:30:08,132 DEBUG [RS:1;c4730a2bacf8:33905 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c4730a2bacf8:33905 2024-11-18T02:30:08,132 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T02:30:08,132 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T02:30:08,132 DEBUG [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T02:30:08,133 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(2659): reportForDuty to master=c4730a2bacf8,36945,1731897006897 with port=33905, startcode=1731897008067 2024-11-18T02:30:08,133 DEBUG [RS:1;c4730a2bacf8:33905 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T02:30:08,136 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39011, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T02:30:08,136 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36945 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c4730a2bacf8,33905,1731897008067 2024-11-18T02:30:08,136 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36945 {}] master.ServerManager(517): Registering regionserver=c4730a2bacf8,33905,1731897008067 2024-11-18T02:30:08,138 DEBUG [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29 2024-11-18T02:30:08,138 DEBUG [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43001 2024-11-18T02:30:08,138 DEBUG [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T02:30:08,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:30:08,142 DEBUG [RS:1;c4730a2bacf8:33905 {}] zookeeper.ZKUtil(111): regionserver:33905-0x10128e905af0002, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c4730a2bacf8,33905,1731897008067 2024-11-18T02:30:08,142 WARN [RS:1;c4730a2bacf8:33905 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:30:08,142 INFO [RS:1;c4730a2bacf8:33905 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:08,142 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c4730a2bacf8,33905,1731897008067] 2024-11-18T02:30:08,142 DEBUG [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067 2024-11-18T02:30:08,147 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T02:30:08,149 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T02:30:08,149 INFO [RS:1;c4730a2bacf8:33905 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T02:30:08,149 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,152 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T02:30:08,154 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T02:30:08,154 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,154 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,154 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,154 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,154 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,154 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,154 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:30:08,154 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,155 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,155 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,155 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,155 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,155 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:08,155 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:30:08,155 DEBUG [RS:1;c4730a2bacf8:33905 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:30:08,159 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,159 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,159 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,159 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,159 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,159 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33905,1731897008067-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:30:08,179 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T02:30:08,179 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33905,1731897008067-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,179 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,179 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.Replication(171): c4730a2bacf8,33905,1731897008067 started 2024-11-18T02:30:08,196 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:08,196 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(1482): Serving as c4730a2bacf8,33905,1731897008067, RpcServer on c4730a2bacf8/172.17.0.2:33905, sessionid=0x10128e905af0002 2024-11-18T02:30:08,196 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T02:30:08,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;c4730a2bacf8:33905,5,FailOnTimeoutGroup] 2024-11-18T02:30:08,196 DEBUG [RS:1;c4730a2bacf8:33905 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c4730a2bacf8,33905,1731897008067 2024-11-18T02:30:08,196 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,33905,1731897008067' 2024-11-18T02:30:08,196 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T02:30:08,197 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-18T02:30:08,197 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T02:30:08,197 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T02:30:08,198 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T02:30:08,198 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T02:30:08,198 DEBUG [RS:1;c4730a2bacf8:33905 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c4730a2bacf8,33905,1731897008067 2024-11-18T02:30:08,198 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,33905,1731897008067' 2024-11-18T02:30:08,198 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T02:30:08,198 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:08,199 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4606f62b 2024-11-18T02:30:08,199 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T02:30:08,199 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T02:30:08,199 DEBUG [RS:1;c4730a2bacf8:33905 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T02:30:08,199 INFO [RS:1;c4730a2bacf8:33905 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T02:30:08,199 INFO [RS:1;c4730a2bacf8:33905 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T02:30:08,201 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51044, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T02:30:08,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T02:30:08,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T02:30:08,202 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:30:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T02:30:08,206 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T02:30:08,206 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:08,206 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-18T02:30:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:30:08,207 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T02:30:08,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44805 is added to blk_1073741835_1011 (size=393) 2024-11-18T02:30:08,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40223 is added to blk_1073741835_1011 (size=393) 2024-11-18T02:30:08,220 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 50432e567ed2d92a56c608107a1430aa, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29 2024-11-18T02:30:08,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44805 is added to blk_1073741836_1012 (size=76) 2024-11-18T02:30:08,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40223 is added to blk_1073741836_1012 (size=76) 2024-11-18T02:30:08,234 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:08,234 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 50432e567ed2d92a56c608107a1430aa, disabling compactions & flushes 2024-11-18T02:30:08,234 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:08,234 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:08,234 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. after waiting 0 ms 2024-11-18T02:30:08,234 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:08,234 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:08,234 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 50432e567ed2d92a56c608107a1430aa: Waiting for close lock at 1731897008234Disabling compacts and flushes for region at 1731897008234Disabling writes for close at 1731897008234Writing region close event to WAL at 1731897008234Closed at 1731897008234 2024-11-18T02:30:08,237 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T02:30:08,237 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731897008237"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731897008237"}]},"ts":"1731897008237"} 2024-11-18T02:30:08,241 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T02:30:08,242 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T02:30:08,243 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731897008242"}]},"ts":"1731897008242"} 2024-11-18T02:30:08,245 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-18T02:30:08,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=50432e567ed2d92a56c608107a1430aa, ASSIGN}] 2024-11-18T02:30:08,248 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=50432e567ed2d92a56c608107a1430aa, ASSIGN 2024-11-18T02:30:08,249 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=50432e567ed2d92a56c608107a1430aa, ASSIGN; state=OFFLINE, location=c4730a2bacf8,42723,1731897006974; forceNewPlan=false, retain=false 2024-11-18T02:30:08,302 INFO [RS:1;c4730a2bacf8:33905 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C33905%2C1731897008067, suffix=, logDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067, archiveDir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs, maxLogs=32 2024-11-18T02:30:08,303 INFO [RS:1;c4730a2bacf8:33905 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33905%2C1731897008067.1731897008303 2024-11-18T02:30:08,315 INFO [RS:1;c4730a2bacf8:33905 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 2024-11-18T02:30:08,325 DEBUG [RS:1;c4730a2bacf8:33905 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38707:38707),(127.0.0.1/127.0.0.1:42453:42453)] 2024-11-18T02:30:08,400 INFO [c4730a2bacf8:36945 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-18T02:30:08,400 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=50432e567ed2d92a56c608107a1430aa, regionState=OPENING, regionLocation=c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:08,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=50432e567ed2d92a56c608107a1430aa, ASSIGN because future has completed 2024-11-18T02:30:08,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 50432e567ed2d92a56c608107a1430aa, server=c4730a2bacf8,42723,1731897006974}] 2024-11-18T02:30:08,535 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T02:30:08,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:08,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:08,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:08,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:08,564 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:08,564 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 50432e567ed2d92a56c608107a1430aa, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:30:08,565 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,565 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:08,565 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,565 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,567 INFO [StoreOpener-50432e567ed2d92a56c608107a1430aa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,569 INFO [StoreOpener-50432e567ed2d92a56c608107a1430aa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50432e567ed2d92a56c608107a1430aa columnFamilyName info 2024-11-18T02:30:08,569 DEBUG [StoreOpener-50432e567ed2d92a56c608107a1430aa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:08,570 INFO [StoreOpener-50432e567ed2d92a56c608107a1430aa-1 {}] regionserver.HStore(327): Store=50432e567ed2d92a56c608107a1430aa/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:08,570 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,571 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,571 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,572 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,572 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,574 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,576 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:30:08,577 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 50432e567ed2d92a56c608107a1430aa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827005, jitterRate=0.05159135162830353}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T02:30:08,577 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:08,578 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 50432e567ed2d92a56c608107a1430aa: Running coprocessor pre-open hook at 1731897008566Writing region info on filesystem at 1731897008566Initializing all the Stores at 1731897008567 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897008567Cleaning up temporary data from old regions at 1731897008572 (+5 ms)Running coprocessor post-open hooks at 1731897008577 (+5 ms)Region opened successfully at 1731897008578 (+1 ms) 2024-11-18T02:30:08,579 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa., pid=6, masterSystemTime=1731897008559 2024-11-18T02:30:08,582 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:08,582 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:08,583 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=50432e567ed2d92a56c608107a1430aa, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:08,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 50432e567ed2d92a56c608107a1430aa, server=c4730a2bacf8,42723,1731897006974 because future has completed 2024-11-18T02:30:08,590 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T02:30:08,590 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 50432e567ed2d92a56c608107a1430aa, server=c4730a2bacf8,42723,1731897006974 in 183 msec 2024-11-18T02:30:08,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T02:30:08,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=50432e567ed2d92a56c608107a1430aa, ASSIGN in 344 msec 2024-11-18T02:30:08,594 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T02:30:08,594 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731897008594"}]},"ts":"1731897008594"} 2024-11-18T02:30:08,597 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-18T02:30:08,598 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T02:30:08,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 396 msec 2024-11-18T02:30:13,447 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T02:30:13,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:13,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:13,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:13,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:13,488 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-18T02:30:17,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:30:17,769 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T02:30:17,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T02:30:17,770 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-18T02:30:17,771 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:30:17,771 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T02:30:18,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:30:18,275 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-18T02:30:18,275 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-18T02:30:18,278 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T02:30:18,278 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:18,291 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:18,295 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:18,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:18,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:18,296 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:30:18,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@520ff668{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:18,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@691515f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:18,412 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c141b19{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/java.io.tmpdir/jetty-localhost-40701-hadoop-hdfs-3_4_1-tests_jar-_-any-14401642021637654994/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:18,413 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cac6b83{HTTP/1.1, (http/1.1)}{localhost:40701} 2024-11-18T02:30:18,413 INFO [Time-limited test {}] server.Server(415): Started @116869ms 2024-11-18T02:30:18,414 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:18,459 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:18,462 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:18,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:18,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:18,463 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:30:18,463 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bd1d04c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:18,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b9c8816{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:18,538 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data5/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:18,538 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data6/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:18,568 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb38864386256465 with lease ID 0x62472321fbf7b329: Processing first storage report for DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc from datanode DatanodeRegistration(127.0.0.1:37597, datanodeUuid=9cdfa387-fa91-4292-8a01-b38283bff5f3, infoPort=40891, infoSecurePort=0, ipcPort=40405, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb38864386256465 with lease ID 0x62472321fbf7b329: from storage DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc node DatanodeRegistration(127.0.0.1:37597, datanodeUuid=9cdfa387-fa91-4292-8a01-b38283bff5f3, infoPort=40891, infoSecurePort=0, ipcPort=40405, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcb38864386256465 with lease ID 0x62472321fbf7b329: Processing first storage report for DS-a184ba64-bbbb-47e8-a58c-04fff0054219 from datanode DatanodeRegistration(127.0.0.1:37597, datanodeUuid=9cdfa387-fa91-4292-8a01-b38283bff5f3, infoPort=40891, infoSecurePort=0, ipcPort=40405, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb38864386256465 with lease ID 0x62472321fbf7b329: from storage DS-a184ba64-bbbb-47e8-a58c-04fff0054219 node DatanodeRegistration(127.0.0.1:37597, datanodeUuid=9cdfa387-fa91-4292-8a01-b38283bff5f3, infoPort=40891, infoSecurePort=0, ipcPort=40405, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:18,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cbf28ae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/java.io.tmpdir/jetty-localhost-46857-hadoop-hdfs-3_4_1-tests_jar-_-any-4170854108542236107/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:18,603 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3eb2cdd3{HTTP/1.1, (http/1.1)}{localhost:46857} 2024-11-18T02:30:18,603 INFO [Time-limited test {}] server.Server(415): Started @117060ms 2024-11-18T02:30:18,605 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:18,645 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:18,649 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:18,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:18,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:18,657 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:30:18,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eebeea4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:18,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e96eece{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:18,723 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data7/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:18,724 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data8/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:18,761 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:18,767 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb87ea4f4a2f65525 with lease ID 0x62472321fbf7b32a: Processing first storage report for DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a from datanode DatanodeRegistration(127.0.0.1:43115, datanodeUuid=d4aaa37d-51f2-4406-8a88-055580cebecb, infoPort=37507, infoSecurePort=0, ipcPort=35011, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:18,767 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb87ea4f4a2f65525 with lease ID 0x62472321fbf7b32a: from storage DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a node DatanodeRegistration(127.0.0.1:43115, datanodeUuid=d4aaa37d-51f2-4406-8a88-055580cebecb, infoPort=37507, infoSecurePort=0, ipcPort=35011, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:18,767 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb87ea4f4a2f65525 with lease ID 0x62472321fbf7b32a: Processing first storage report for DS-379a2a3c-dc50-4008-b6f8-eb1101b26ce0 from datanode DatanodeRegistration(127.0.0.1:43115, datanodeUuid=d4aaa37d-51f2-4406-8a88-055580cebecb, infoPort=37507, infoSecurePort=0, ipcPort=35011, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:18,767 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb87ea4f4a2f65525 with lease ID 0x62472321fbf7b32a: from storage DS-379a2a3c-dc50-4008-b6f8-eb1101b26ce0 node DatanodeRegistration(127.0.0.1:43115, datanodeUuid=d4aaa37d-51f2-4406-8a88-055580cebecb, infoPort=37507, infoSecurePort=0, ipcPort=35011, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:18,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@92842f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/java.io.tmpdir/jetty-localhost-42025-hadoop-hdfs-3_4_1-tests_jar-_-any-12730122778189342725/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:18,800 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a677b{HTTP/1.1, (http/1.1)}{localhost:42025} 2024-11-18T02:30:18,800 INFO [Time-limited test {}] server.Server(415): Started @117256ms 2024-11-18T02:30:18,802 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:18,910 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:18,910 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10/current/BP-1456477421-172.17.0.2-1731897006175/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:18,937 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:18,940 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92bc65de9cc2f34e with lease ID 0x62472321fbf7b32b: Processing first storage report for DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd from datanode DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:18,940 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92bc65de9cc2f34e with lease ID 0x62472321fbf7b32b: from storage DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd node DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T02:30:18,940 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x92bc65de9cc2f34e with lease ID 0x62472321fbf7b32b: Processing first storage report for DS-122145b9-5033-4959-b523-8404e6b1cc94 from datanode DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175) 2024-11-18T02:30:18,940 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x92bc65de9cc2f34e with lease ID 0x62472321fbf7b32b: from storage DS-122145b9-5033-4959-b523-8404e6b1cc94 node DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:19,033 WARN [ResponseProcessor for block BP-1456477421-172.17.0.2-1731897006175:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1456477421-172.17.0.2-1731897006175:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,033 WARN [ResponseProcessor for block BP-1456477421-172.17.0.2-1731897006175:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1456477421-172.17.0.2-1731897006175:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,034 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897007146 block BP-1456477421-172.17.0.2-1731897006175:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:19,034 WARN [ResponseProcessor for block BP-1456477421-172.17.0.2-1731897006175:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1456477421-172.17.0.2-1731897006175:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,035 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 block BP-1456477421-172.17.0.2-1731897006175:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:19,035 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 block BP-1456477421-172.17.0.2-1731897006175:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:19,036 WARN [ResponseProcessor for block BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,036 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta block BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK], DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:19,037 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-447774183_22 at /127.0.0.1:32852 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32852 dst: /127.0.0.1:40223 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,038 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-447774183_22 at /127.0.0.1:58240 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44805:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58240 dst: /127.0.0.1:44805 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@461a42e8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:19,039 WARN [PacketResponder: BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40223] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,040 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1768377466_22 at /127.0.0.1:58322 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44805:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58322 dst: /127.0.0.1:44805 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,040 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@217a95d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:19,041 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:19,040 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1768377466_22 at /127.0.0.1:32920 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32920 dst: /127.0.0.1:40223 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,041 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63c2c4fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:19,041 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:58274 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44805:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58274 dst: /127.0.0.1:44805 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,041 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:32888 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32888 dst: /127.0.0.1:40223 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,041 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35026af9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:19,042 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:58284 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44805:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58284 dst: /127.0.0.1:44805 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,042 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:32896 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32896 dst: /127.0.0.1:40223 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,049 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:19,049 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:19,050 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:19,050 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1456477421-172.17.0.2-1731897006175 (Datanode Uuid 35b9dbaa-4267-42f2-98b7-389d3e5f4a4e) service to localhost/127.0.0.1:43001 2024-11-18T02:30:19,050 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data3/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:19,051 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data4/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:19,053 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:19,053 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897007146 block BP-1456477421-172.17.0.2-1731897006175:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,053 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 block BP-1456477421-172.17.0.2-1731897006175:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,054 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-447774183_22 at /127.0.0.1:39010 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44805:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39010 dst: /127.0.0.1:44805 java.io.IOException: The stream is closed at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:914) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,054 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta block BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,053 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@57dfa14a {}] datanode.DataXceiver(331): 127.0.0.1:44805:DataXceiver error processing unknown operation src: /127.0.0.1:39012 dst: /127.0.0.1:44805 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:19,056 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 block BP-1456477421-172.17.0.2-1731897006175:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@805be33{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:19,071 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@372d60ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:19,071 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:19,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e195dbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:19,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72bec19e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:19,072 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:19,072 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:19,073 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1456477421-172.17.0.2-1731897006175 (Datanode Uuid 9c65b774-e3f5-465f-ad31-04e2403fc80d) service to localhost/127.0.0.1:43001 2024-11-18T02:30:19,073 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:19,073 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data1/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:19,074 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data2/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:19,074 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:19,078 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa., hostname=c4730a2bacf8,42723,1731897006974, seqNum=2] 2024-11-18T02:30:19,079 ERROR [FSHLog-0-hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29-prefix:c4730a2bacf8,42723,1731897006974 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,080 WARN [FSHLog-0-hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29-prefix:c4730a2bacf8,42723,1731897006974 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,080 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,080 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C42723%2C1731897006974:(num 1731897007537) roll requested 2024-11-18T02:30:19,080 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C42723%2C1731897006974.1731897019080 2024-11-18T02:30:19,102 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:19,102 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:19,102 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:19,102 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:19,102 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:19,102 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897019080 2024-11-18T02:30:19,103 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,103 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:19,104 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-18T02:30:19,104 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-18T02:30:19,104 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 2024-11-18T02:30:19,107 WARN [IPC Server handler 1 on default port 43001 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-18T02:30:19,111 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40891:40891),(127.0.0.1/127.0.0.1:37507:37507)] 2024-11-18T02:30:19,111 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 is not closed yet, will try archiving it next time 2024-11-18T02:30:19,111 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 after 5ms 2024-11-18T02:30:19,114 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:20,156 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:21,112 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:21,114 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897019080 2024-11-18T02:30:21,114 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:21,114 WARN [ResponseProcessor for block BP-1456477421-172.17.0.2-1731897006175:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1456477421-172.17.0.2-1731897006175:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:21,115 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897019080 block BP-1456477421-172.17.0.2-1731897006175:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]) is bad. 2024-11-18T02:30:21,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:55718 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37597:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55718 dst: /127.0.0.1:37597 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:21,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:38572 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:43115:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38572 dst: /127.0.0.1:43115 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:21,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c141b19{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:21,119 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cac6b83{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:21,119 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:21,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@691515f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:21,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@520ff668{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:21,120 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:21,121 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:21,121 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1456477421-172.17.0.2-1731897006175 (Datanode Uuid 9cdfa387-fa91-4292-8a01-b38283bff5f3) service to localhost/127.0.0.1:43001 2024-11-18T02:30:21,121 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:21,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data5/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:21,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data6/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:21,121 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:22,156 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:23,112 WARN [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]] 2024-11-18T02:30:23,113 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 after 4009ms 2024-11-18T02:30:23,113 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:23,113 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C42723%2C1731897006974:(num 1731897019080) roll requested 2024-11-18T02:30:23,113 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C42723%2C1731897006974.1731897023113 2024-11-18T02:30:23,114 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:23,118 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44805 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:23,118 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]) is bad. 2024-11-18T02:30:23,118 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:45984 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741839_1021 to mirror 127.0.0.1:44805 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:23,118 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741839_1021 2024-11-18T02:30:23,118 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:45984 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T02:30:23,118 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:45984 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45984 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:23,120 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK] 2024-11-18T02:30:23,123 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:23,123 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]) is bad. 2024-11-18T02:30:23,123 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741840_1022 2024-11-18T02:30:23,124 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK] 2024-11-18T02:30:23,125 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:23,125 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:23,125 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741841_1023 2024-11-18T02:30:23,126 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK] 2024-11-18T02:30:23,126 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T02:30:23,130 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:23,130 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:23,130 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:23,131 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:23,131 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:23,131 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897019080 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897023113 2024-11-18T02:30:23,132 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43685:43685),(127.0.0.1/127.0.0.1:37507:37507)] 2024-11-18T02:30:23,132 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 is not closed yet, will try archiving it next time 2024-11-18T02:30:23,132 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897019080 is not closed yet, will try archiving it next time 2024-11-18T02:30:23,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43115 is added to blk_1073741838_1020 (size=3600) 2024-11-18T02:30:23,534 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 is not closed yet, will try archiving it next time 2024-11-18T02:30:24,157 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:24,777 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@57e35ef0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43115, datanodeUuid=d4aaa37d-51f2-4406-8a88-055580cebecb, infoPort=37507, infoSecurePort=0, ipcPort=35011, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741838_1020 to 127.0.0.1:40223 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,115 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,130 WARN [ResponseProcessor for block BP-1456477421-172.17.0.2-1731897006175:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1456477421-172.17.0.2-1731897006175:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-1456477421-172.17.0.2-1731897006175:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,130 WARN [DataStreamer for file /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897023113 block BP-1456477421-172.17.0.2-1731897006175:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:25,131 WARN [PacketResponder: BP-1456477421-172.17.0.2-1731897006175:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43115] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,131 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:45994 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45994 dst: /127.0.0.1:37435 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,132 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:34074 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:43115:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34074 dst: /127.0.0.1:43115 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,132 WARN [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]] 2024-11-18T02:30:25,132 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,132 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C42723%2C1731897006974:(num 1731897023113) roll requested 2024-11-18T02:30:25,133 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C42723%2C1731897006974.1731897025132 2024-11-18T02:30:25,133 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cbf28ae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:25,134 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3eb2cdd3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:25,134 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:25,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b9c8816{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:25,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bd1d04c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:25,137 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:25,137 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:25,137 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1456477421-172.17.0.2-1731897006175 (Datanode Uuid d4aaa37d-51f2-4406-8a88-055580cebecb) service to localhost/127.0.0.1:43001 2024-11-18T02:30:25,137 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:25,137 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data7/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:25,138 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:25,138 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46014 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741843_1026 to mirror 127.0.0.1:37597 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,138 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37597 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,138 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]) is bad. 2024-11-18T02:30:25,138 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741843_1026 2024-11-18T02:30:25,139 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46014 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T02:30:25,140 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK] 2024-11-18T02:30:25,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46014 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46014 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,142 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,142 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK], DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:25,142 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741844_1027 2024-11-18T02:30:25,143 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK] 2024-11-18T02:30:25,144 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data8/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:25,144 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,145 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK], DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]) is bad. 2024-11-18T02:30:25,145 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741845_1028 2024-11-18T02:30:25,146 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK] 2024-11-18T02:30:25,149 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43115 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,149 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:25,149 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46028 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741846_1029 to mirror 127.0.0.1:43115 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,149 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741846_1029 2024-11-18T02:30:25,150 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46028 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T02:30:25,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46028 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46028 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,150 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:25,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42723 {}] regionserver.HRegion(8855): Flush requested on 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:25,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 50432e567ed2d92a56c608107a1430aa 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:30:25,151 WARN [IPC Server handler 3 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T02:30:25,151 WARN [IPC Server handler 3 on default port 43001 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T02:30:25,151 WARN [IPC Server handler 3 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T02:30:25,158 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:25,158 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:25,158 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:25,159 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:25,159 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:25,159 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897023113 with entries=7, filesize=7.25 KB; new WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897025132 2024-11-18T02:30:25,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741842_1025 (size=7430) 2024-11-18T02:30:25,172 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43685:43685)] 2024-11-18T02:30:25,172 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 is not closed yet, will try archiving it next time 2024-11-18T02:30:25,172 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897023113 is not closed yet, will try archiving it next time 2024-11-18T02:30:25,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/cad0e402632e476991567cfb9e15ddb6 is 1080, key is row0002/info:/1731897021123/Put/seqid=0 2024-11-18T02:30:25,179 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,180 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK], DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:25,180 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741848_1031 2024-11-18T02:30:25,180 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:25,181 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,182 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK], DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]) is bad. 2024-11-18T02:30:25,182 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741849_1032 2024-11-18T02:30:25,182 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK] 2024-11-18T02:30:25,185 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40223 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46040 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741850_1033 to mirror 127.0.0.1:40223 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,185 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:25,185 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741850_1033 2024-11-18T02:30:25,185 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46040 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T02:30:25,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46040 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46040 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,186 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK] 2024-11-18T02:30:25,188 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37597 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,188 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]) is bad. 2024-11-18T02:30:25,188 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741851_1034 2024-11-18T02:30:25,188 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46050 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741851_1034 to mirror 127.0.0.1:37597 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,188 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46050 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T02:30:25,189 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK] 2024-11-18T02:30:25,188 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46050 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46050 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:25,189 WARN [IPC Server handler 2 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T02:30:25,189 WARN [IPC Server handler 2 on default port 43001 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T02:30:25,189 WARN [IPC Server handler 2 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T02:30:25,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741852_1035 (size=10347) 2024-11-18T02:30:25,562 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 is not closed yet, will try archiving it next time 2024-11-18T02:30:25,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/cad0e402632e476991567cfb9e15ddb6 2024-11-18T02:30:25,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/cad0e402632e476991567cfb9e15ddb6 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/cad0e402632e476991567cfb9e15ddb6 2024-11-18T02:30:25,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/cad0e402632e476991567cfb9e15ddb6, entries=5, sequenceid=11, filesize=10.1 K 2024-11-18T02:30:25,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 50432e567ed2d92a56c608107a1430aa in 464ms, sequenceid=11, compaction requested=false 2024-11-18T02:30:25,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 50432e567ed2d92a56c608107a1430aa: 2024-11-18T02:30:25,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42723 {}] regionserver.HRegion(8855): Flush requested on 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:25,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 50432e567ed2d92a56c608107a1430aa 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-18T02:30:25,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/076c2331091945ae859cbb3a01e27576 is 1080, key is row0007/info:/1731897025152/Put/seqid=0 2024-11-18T02:30:25,797 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,797 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:25,797 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741853_1036 2024-11-18T02:30:25,797 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK] 2024-11-18T02:30:25,798 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,799 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]) is bad. 2024-11-18T02:30:25,799 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741854_1037 2024-11-18T02:30:25,799 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK] 2024-11-18T02:30:25,800 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,800 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:25,800 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741855_1038 2024-11-18T02:30:25,801 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:25,802 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:25,802 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK], DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]) is bad. 2024-11-18T02:30:25,802 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741856_1039 2024-11-18T02:30:25,802 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK] 2024-11-18T02:30:25,803 WARN [IPC Server handler 4 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T02:30:25,803 WARN [IPC Server handler 4 on default port 43001 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T02:30:25,803 WARN [IPC Server handler 4 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T02:30:25,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741857_1040 (size=12506) 2024-11-18T02:30:26,157 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:26,207 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/076c2331091945ae859cbb3a01e27576 2024-11-18T02:30:26,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/076c2331091945ae859cbb3a01e27576 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/076c2331091945ae859cbb3a01e27576 2024-11-18T02:30:26,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/076c2331091945ae859cbb3a01e27576, entries=7, sequenceid=24, filesize=12.2 K 2024-11-18T02:30:26,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 50432e567ed2d92a56c608107a1430aa in 431ms, sequenceid=24, compaction requested=false 2024-11-18T02:30:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 50432e567ed2d92a56c608107a1430aa: 2024-11-18T02:30:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-18T02:30:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:26,221 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/076c2331091945ae859cbb3a01e27576 because midkey is the same as first or last row 2024-11-18T02:30:27,115 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,172 WARN [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]] 2024-11-18T02:30:27,172 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,172 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C42723%2C1731897006974:(num 1731897025132) roll requested 2024-11-18T02:30:27,173 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C42723%2C1731897006974.1731897027172 2024-11-18T02:30:27,175 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,176 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:27,176 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741858_1041 2024-11-18T02:30:27,176 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK] 2024-11-18T02:30:27,178 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43115 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,178 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46100 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741859_1042 to mirror 127.0.0.1:43115 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,178 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:27,179 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741859_1042 2024-11-18T02:30:27,179 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46100 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T02:30:27,179 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46100 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46100 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,179 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:27,181 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37597 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46110 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741860_1043 to mirror 127.0.0.1:37597 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,181 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]) is bad. 2024-11-18T02:30:27,181 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741860_1043 2024-11-18T02:30:27,181 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46110 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T02:30:27,181 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46110 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46110 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,182 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK] 2024-11-18T02:30:27,183 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,183 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK], DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]) is bad. 2024-11-18T02:30:27,183 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741861_1044 2024-11-18T02:30:27,184 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK] 2024-11-18T02:30:27,184 WARN [IPC Server handler 1 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T02:30:27,184 WARN [IPC Server handler 1 on default port 43001 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T02:30:27,184 WARN [IPC Server handler 1 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T02:30:27,187 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:27,187 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:27,187 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:27,187 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:27,187 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:27,187 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897025132 with entries=17, filesize=17.07 KB; new WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897027172 2024-11-18T02:30:27,188 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43685:43685)] 2024-11-18T02:30:27,188 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 is not closed yet, will try archiving it next time 2024-11-18T02:30:27,188 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897025132 is not closed yet, will try archiving it next time 2024-11-18T02:30:27,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741847_1030 (size=17486) 2024-11-18T02:30:27,192 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897019080 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs/c4730a2bacf8%2C42723%2C1731897006974.1731897019080 2024-11-18T02:30:27,194 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897023113 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs/c4730a2bacf8%2C42723%2C1731897006974.1731897023113 2024-11-18T02:30:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42723 {}] regionserver.HRegion(8855): Flush requested on 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:27,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 50432e567ed2d92a56c608107a1430aa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T02:30:27,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/5c2b97e8cf994cb098a2e91515069bad is 1079, key is tmprow/info:/1731897027209/Put/seqid=0 2024-11-18T02:30:27,215 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,216 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:27,216 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741863_1046 2024-11-18T02:30:27,216 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK] 2024-11-18T02:30:27,218 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43115 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,218 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46120 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741864_1047 to mirror 127.0.0.1:43115 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,218 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:27,218 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741864_1047 2024-11-18T02:30:27,218 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46120 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T02:30:27,218 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46120 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46120 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,219 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:27,221 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44805 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46126 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741865_1048 to mirror 127.0.0.1:44805 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,221 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]) is bad. 2024-11-18T02:30:27,221 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741865_1048 2024-11-18T02:30:27,221 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46126 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T02:30:27,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46126 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46126 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,222 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK] 2024-11-18T02:30:27,223 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46130 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741866_1049 to mirror 127.0.0.1:37597 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,223 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37597 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,224 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46130 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T02:30:27,224 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]) is bad. 2024-11-18T02:30:27,224 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741866_1049 2024-11-18T02:30:27,224 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46130 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46130 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,224 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK] 2024-11-18T02:30:27,225 WARN [IPC Server handler 0 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T02:30:27,225 WARN [IPC Server handler 0 on default port 43001 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T02:30:27,225 WARN [IPC Server handler 0 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T02:30:27,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741867_1050 (size=6027) 2024-11-18T02:30:27,590 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 is not closed yet, will try archiving it next time 2024-11-18T02:30:27,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/5c2b97e8cf994cb098a2e91515069bad 2024-11-18T02:30:27,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/5c2b97e8cf994cb098a2e91515069bad as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/5c2b97e8cf994cb098a2e91515069bad 2024-11-18T02:30:27,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/5c2b97e8cf994cb098a2e91515069bad, entries=1, sequenceid=34, filesize=5.9 K 2024-11-18T02:30:27,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 50432e567ed2d92a56c608107a1430aa in 433ms, sequenceid=34, compaction requested=true 2024-11-18T02:30:27,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 50432e567ed2d92a56c608107a1430aa: 2024-11-18T02:30:27,643 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-18T02:30:27,643 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:27,643 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/076c2331091945ae859cbb3a01e27576 because midkey is the same as first or last row 2024-11-18T02:30:27,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 50432e567ed2d92a56c608107a1430aa:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:30:27,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:30:27,644 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:30:27,645 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:30:27,645 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HStore(1541): 50432e567ed2d92a56c608107a1430aa/info is initiating minor compaction (all files) 2024-11-18T02:30:27,645 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 50432e567ed2d92a56c608107a1430aa/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:27,645 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/cad0e402632e476991567cfb9e15ddb6, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/076c2331091945ae859cbb3a01e27576, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/5c2b97e8cf994cb098a2e91515069bad] into tmpdir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp, totalSize=28.2 K 2024-11-18T02:30:27,646 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.Compactor(225): Compacting cad0e402632e476991567cfb9e15ddb6, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731897021123 2024-11-18T02:30:27,646 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.Compactor(225): Compacting 076c2331091945ae859cbb3a01e27576, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731897025152 2024-11-18T02:30:27,647 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c2b97e8cf994cb098a2e91515069bad, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731897027209 2024-11-18T02:30:27,659 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 50432e567ed2d92a56c608107a1430aa#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:30:27,659 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/085911291a19419f8711e1835e2af862 is 1080, key is row0002/info:/1731897021123/Put/seqid=0 2024-11-18T02:30:27,662 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40223 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,661 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46156 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741868_1051 to mirror 127.0.0.1:40223 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,662 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:27,662 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741868_1051 2024-11-18T02:30:27,662 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46156 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T02:30:27,662 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46156 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46156 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,662 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK] 2024-11-18T02:30:27,664 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37597 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,664 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46162 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741869_1052 to mirror 127.0.0.1:37597 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,664 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]) is bad. 2024-11-18T02:30:27,664 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741869_1052 2024-11-18T02:30:27,664 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46162 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T02:30:27,664 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46162 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46162 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,665 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK] 2024-11-18T02:30:27,666 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,666 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK], DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:27,666 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741870_1053 2024-11-18T02:30:27,666 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:27,668 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44805 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:27,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46168 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741871_1054 to mirror 127.0.0.1:44805 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,668 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]) is bad. 2024-11-18T02:30:27,668 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741871_1054 2024-11-18T02:30:27,668 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46168 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T02:30:27,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46168 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46168 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,669 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK] 2024-11-18T02:30:27,669 WARN [IPC Server handler 3 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T02:30:27,669 WARN [IPC Server handler 3 on default port 43001 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T02:30:27,670 WARN [IPC Server handler 3 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T02:30:27,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741872_1055 (size=17994) 2024-11-18T02:30:27,941 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@63aed152[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741842_1025 to 127.0.0.1:44805 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:27,941 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@77531de7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741852_1035 to 127.0.0.1:37597 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:28,080 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/085911291a19419f8711e1835e2af862 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862 2024-11-18T02:30:28,087 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 50432e567ed2d92a56c608107a1430aa/info of 50432e567ed2d92a56c608107a1430aa into 085911291a19419f8711e1835e2af862(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:30:28,087 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 50432e567ed2d92a56c608107a1430aa: 2024-11-18T02:30:28,087 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa., storeName=50432e567ed2d92a56c608107a1430aa/info, priority=13, startTime=1731897027643; duration=0sec 2024-11-18T02:30:28,087 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T02:30:28,087 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:28,087 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862 because midkey is the same as first or last row 2024-11-18T02:30:28,087 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T02:30:28,087 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:28,087 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862 because midkey is the same as first or last row 2024-11-18T02:30:28,088 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T02:30:28,088 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:28,088 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862 because midkey is the same as first or last row 2024-11-18T02:30:28,088 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:30:28,088 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 50432e567ed2d92a56c608107a1430aa:info 2024-11-18T02:30:28,158 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:28,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42723 {}] regionserver.HRegion(8855): Flush requested on 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:28,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 50432e567ed2d92a56c608107a1430aa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T02:30:28,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/1ab80c68206c4b57a18e5f03d280e9d7 is 1079, key is tmprow/info:/1731897028627/Put/seqid=0 2024-11-18T02:30:28,635 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:28,635 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:28,635 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741873_1056 2024-11-18T02:30:28,635 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:28,637 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:28,637 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]) is bad. 2024-11-18T02:30:28,637 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741874_1057 2024-11-18T02:30:28,637 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40223,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK] 2024-11-18T02:30:28,638 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:28,639 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK]) is bad. 2024-11-18T02:30:28,639 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741875_1058 2024-11-18T02:30:28,639 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37597,DS-ed20a1a3-accb-46ca-8561-4ac78e762bcc,DISK] 2024-11-18T02:30:28,641 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44805 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:28,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46194 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10]'}, localName='127.0.0.1:37435', datanodeUuid='9a7121b3-2bd2-43d9-97a2-7e69ac76ec76', xmitsInProgress=0}:Exception transferring block BP-1456477421-172.17.0.2-1731897006175:blk_1073741876_1059 to mirror 127.0.0.1:44805 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:28,641 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK], DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]) is bad. 2024-11-18T02:30:28,641 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741876_1059 2024-11-18T02:30:28,641 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46194 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T02:30:28,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_538001803_22 at /127.0.0.1:46194 [Receiving block BP-1456477421-172.17.0.2-1731897006175:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:37435:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46194 dst: /127.0.0.1:37435 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:28,642 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK] 2024-11-18T02:30:28,642 WARN [IPC Server handler 2 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T02:30:28,642 WARN [IPC Server handler 2 on default port 43001 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T02:30:28,642 WARN [IPC Server handler 2 on default port 43001 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T02:30:28,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741877_1060 (size=6027) 2024-11-18T02:30:28,940 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@77531de7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741857_1040 to 127.0.0.1:40223 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:28,940 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@63aed152[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741847_1030 to 127.0.0.1:43115 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:29,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/1ab80c68206c4b57a18e5f03d280e9d7 2024-11-18T02:30:29,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/1ab80c68206c4b57a18e5f03d280e9d7 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/1ab80c68206c4b57a18e5f03d280e9d7 2024-11-18T02:30:29,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/1ab80c68206c4b57a18e5f03d280e9d7, entries=1, sequenceid=45, filesize=5.9 K 2024-11-18T02:30:29,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 50432e567ed2d92a56c608107a1430aa in 431ms, sequenceid=45, compaction requested=false 2024-11-18T02:30:29,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 50432e567ed2d92a56c608107a1430aa: 2024-11-18T02:30:29,059 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-18T02:30:29,059 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:29,059 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862 because midkey is the same as first or last row 2024-11-18T02:30:29,116 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:29,192 WARN [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-18T02:30:29,192 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:29,244 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:29,247 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:29,248 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:29,248 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:29,249 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:30:29,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c9b51ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:29,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79e92ca8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:29,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3bac5d7c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/java.io.tmpdir/jetty-localhost-42865-hadoop-hdfs-3_4_1-tests_jar-_-any-575669118312560441/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:29,366 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ecaf507{HTTP/1.1, (http/1.1)}{localhost:42865} 2024-11-18T02:30:29,366 INFO [Time-limited test {}] server.Server(415): Started @127822ms 2024-11-18T02:30:29,368 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:29,467 WARN [Thread-985 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:29,475 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x296737be39c53bd9 with lease ID 0x62472321fbf7b32c: from storage DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8 node DatanodeRegistration(127.0.0.1:41301, datanodeUuid=35b9dbaa-4267-42f2-98b7-389d3e5f4a4e, infoPort=42207, infoSecurePort=0, ipcPort=41845, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:29,475 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x296737be39c53bd9 with lease ID 0x62472321fbf7b32c: from storage DS-61c17e9d-d1fb-45a8-aa47-87b975fa7f2c node DatanodeRegistration(127.0.0.1:41301, datanodeUuid=35b9dbaa-4267-42f2-98b7-389d3e5f4a4e, infoPort=42207, infoSecurePort=0, ipcPort=41845, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:30,158 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:30,940 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@77531de7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741872_1055 to 127.0.0.1:43115 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:30,940 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@63aed152[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741867_1050 to 127.0.0.1:43115 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:31,116 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:31,193 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:31,939 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@63aed152[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37435, datanodeUuid=9a7121b3-2bd2-43d9-97a2-7e69ac76ec76, infoPort=43685, infoSecurePort=0, ipcPort=33571, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741877_1060 to 127.0.0.1:37597 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:32,158 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:33,116 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:33,193 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:34,159 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:35,117 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:35,193 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:36,159 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:36,874 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T02:30:37,117 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:37,194 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:37,286 ERROR [FSHLog-0-hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData-prefix:c4730a2bacf8,36945,1731897006897 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:37,286 WARN [FSHLog-0-hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData-prefix:c4730a2bacf8,36945,1731897006897 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:37,286 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C36945%2C1731897006897:(num 1731897007146) roll requested 2024-11-18T02:30:37,287 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C36945%2C1731897006897.1731897037287 2024-11-18T02:30:37,290 WARN [Thread-1005 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:37,290 WARN [Thread-1005 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK], DatanodeInfoWithStorage[127.0.0.1:41301,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]) is bad. 2024-11-18T02:30:37,290 WARN [Thread-1005 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741878_1061 2024-11-18T02:30:37,290 WARN [Thread-1005 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK] 2024-11-18T02:30:37,295 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:37,295 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:37,295 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:37,295 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:37,295 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:37,295 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897007146 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897037287 2024-11-18T02:30:37,296 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:37,296 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:37,296 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897007146 2024-11-18T02:30:37,296 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43685:43685),(127.0.0.1/127.0.0.1:42207:42207)] 2024-11-18T02:30:37,296 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897007146 is not closed yet, will try archiving it next time 2024-11-18T02:30:37,296 WARN [IPC Server handler 3 on default port 43001 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897007146 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1006 2024-11-18T02:30:37,296 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897007146 after 0ms 2024-11-18T02:30:38,159 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:39,194 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:39,494 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2377c425 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1456477421-172.17.0.2-1731897006175:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:44805,null,null]) java.net.ConnectException: Call From c4730a2bacf8/172.17.0.2 to localhost:33871 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T02:30:39,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741833_1019 (size=455) 2024-11-18T02:30:40,128 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs/c4730a2bacf8%2C42723%2C1731897006974.1731897007537 2024-11-18T02:30:40,130 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897025132 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs/c4730a2bacf8%2C42723%2C1731897006974.1731897025132 2024-11-18T02:30:40,160 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:41,194 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:41,298 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/WALs/c4730a2bacf8,36945,1731897006897/c4730a2bacf8%2C36945%2C1731897006897.1731897007146 after 4002ms 2024-11-18T02:30:42,160 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:42,471 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@121ee783[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41301, datanodeUuid=35b9dbaa-4267-42f2-98b7-389d3e5f4a4e, infoPort=42207, infoSecurePort=0, ipcPort=41845, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741833_1019 to 127.0.0.1:43115 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:42,471 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5e938a71[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41301, datanodeUuid=35b9dbaa-4267-42f2-98b7-389d3e5f4a4e, infoPort=42207, infoSecurePort=0, ipcPort=41845, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741835_1011 to 127.0.0.1:43115 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:43,195 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:43,471 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@121ee783[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41301, datanodeUuid=35b9dbaa-4267-42f2-98b7-389d3e5f4a4e, infoPort=42207, infoSecurePort=0, ipcPort=41845, storageInfo=lv=-57;cid=testClusterID;nsid=1112230850;c=1731897006175):Failed to transfer BP-1456477421-172.17.0.2-1731897006175:blk_1073741829_1005 to 127.0.0.1:43115 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:43,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:30:44,160 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:44,805 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C42723%2C1731897006974.1731897044804 2024-11-18T02:30:44,807 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:44,807 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741880_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK], DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:44,807 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741880_1064 2024-11-18T02:30:44,808 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:44,812 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:44,812 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:44,812 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:44,812 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:44,813 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:44,813 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897027172 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897044804 2024-11-18T02:30:44,813 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43685:43685),(127.0.0.1/127.0.0.1:42207:42207)] 2024-11-18T02:30:44,813 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897027172 is not closed yet, will try archiving it next time 2024-11-18T02:30:44,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741862_1045 (size=13591) 2024-11-18T02:30:44,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42723 {}] regionserver.HRegion(8855): Flush requested on 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:44,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 50432e567ed2d92a56c608107a1430aa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T02:30:44,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/963afc47f19341d08c3a05b817f13cd5 is 1080, key is row0013/info:/1731897044815/Put/seqid=0 2024-11-18T02:30:44,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741882_1066 (size=11421) 2024-11-18T02:30:44,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741882_1066 (size=11421) 2024-11-18T02:30:44,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/963afc47f19341d08c3a05b817f13cd5 2024-11-18T02:30:44,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/963afc47f19341d08c3a05b817f13cd5 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/963afc47f19341d08c3a05b817f13cd5 2024-11-18T02:30:44,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/963afc47f19341d08c3a05b817f13cd5, entries=6, sequenceid=55, filesize=11.2 K 2024-11-18T02:30:44,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 50432e567ed2d92a56c608107a1430aa in 26ms, sequenceid=55, compaction requested=true 2024-11-18T02:30:44,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 50432e567ed2d92a56c608107a1430aa: 2024-11-18T02:30:44,849 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-18T02:30:44,849 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:44,849 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862 because midkey is the same as first or last row 2024-11-18T02:30:44,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 50432e567ed2d92a56c608107a1430aa:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:30:44,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:30:44,849 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:30:44,851 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:30:44,851 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HStore(1541): 50432e567ed2d92a56c608107a1430aa/info is initiating minor compaction (all files) 2024-11-18T02:30:44,851 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 50432e567ed2d92a56c608107a1430aa/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:44,851 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/1ab80c68206c4b57a18e5f03d280e9d7, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/963afc47f19341d08c3a05b817f13cd5] into tmpdir=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp, totalSize=34.6 K 2024-11-18T02:30:44,851 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.Compactor(225): Compacting 085911291a19419f8711e1835e2af862, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731897021123 2024-11-18T02:30:44,852 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1ab80c68206c4b57a18e5f03d280e9d7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731897028627 2024-11-18T02:30:44,852 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] compactions.Compactor(225): Compacting 963afc47f19341d08c3a05b817f13cd5, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731897029033 2024-11-18T02:30:44,867 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 50432e567ed2d92a56c608107a1430aa#info#compaction#24 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:30:44,867 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/a1d52f5ac8e84a929dd1274fac2b49bf is 1080, key is row0002/info:/1731897021123/Put/seqid=0 2024-11-18T02:30:44,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741883_1067 (size=23502) 2024-11-18T02:30:44,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741883_1067 (size=23502) 2024-11-18T02:30:44,879 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/a1d52f5ac8e84a929dd1274fac2b49bf as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/a1d52f5ac8e84a929dd1274fac2b49bf 2024-11-18T02:30:44,885 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 50432e567ed2d92a56c608107a1430aa/info of 50432e567ed2d92a56c608107a1430aa into a1d52f5ac8e84a929dd1274fac2b49bf(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:30:44,885 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 50432e567ed2d92a56c608107a1430aa: 2024-11-18T02:30:44,885 INFO [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa., storeName=50432e567ed2d92a56c608107a1430aa/info, priority=13, startTime=1731897044849; duration=0sec 2024-11-18T02:30:44,885 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-18T02:30:44,885 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:44,885 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/a1d52f5ac8e84a929dd1274fac2b49bf because midkey is the same as first or last row 2024-11-18T02:30:44,885 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-18T02:30:44,885 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:44,885 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/a1d52f5ac8e84a929dd1274fac2b49bf because midkey is the same as first or last row 2024-11-18T02:30:44,885 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-18T02:30:44,885 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:44,886 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/a1d52f5ac8e84a929dd1274fac2b49bf because midkey is the same as first or last row 2024-11-18T02:30:44,886 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:30:44,886 DEBUG [RS:0;c4730a2bacf8:42723-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 50432e567ed2d92a56c608107a1430aa:info 2024-11-18T02:30:45,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42723 {}] regionserver.HRegion(8855): Flush requested on 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:45,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 50432e567ed2d92a56c608107a1430aa 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T02:30:45,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/162f2aec48d64171a90a2b0ab416d785 is 1080, key is row0018/info:/1731897044824/Put/seqid=0 2024-11-18T02:30:45,045 WARN [Thread-1038 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,045 WARN [Thread-1038 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK], DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:45,045 WARN [Thread-1038 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741884_1068 2024-11-18T02:30:45,046 WARN [Thread-1038 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:45,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741885_1069 (size=11421) 2024-11-18T02:30:45,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741885_1069 (size=11421) 2024-11-18T02:30:45,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/162f2aec48d64171a90a2b0ab416d785 2024-11-18T02:30:45,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/.tmp/info/162f2aec48d64171a90a2b0ab416d785 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/162f2aec48d64171a90a2b0ab416d785 2024-11-18T02:30:45,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/162f2aec48d64171a90a2b0ab416d785, entries=6, sequenceid=66, filesize=11.2 K 2024-11-18T02:30:45,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 50432e567ed2d92a56c608107a1430aa in 26ms, sequenceid=66, compaction requested=false 2024-11-18T02:30:45,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 50432e567ed2d92a56c608107a1430aa: 2024-11-18T02:30:45,065 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-18T02:30:45,065 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:30:45,065 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/a1d52f5ac8e84a929dd1274fac2b49bf because midkey is the same as first or last row 2024-11-18T02:30:45,195 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-18T02:30:45,195 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,215 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.1731897027172 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs/c4730a2bacf8%2C42723%2C1731897006974.1731897027172 2024-11-18T02:30:45,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T02:30:45,238 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:30:45,239 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:45,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:45,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:45,239 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T02:30:45,239 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T02:30:45,239 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1650848494, stopped=false 2024-11-18T02:30:45,239 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c4730a2bacf8,36945,1731897006897 2024-11-18T02:30:45,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:45,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:45,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10128e905af0002, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:45,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:45,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:45,243 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:30:45,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10128e905af0002, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:45,243 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:30:45,243 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:45,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:45,244 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c4730a2bacf8,42723,1731897006974' ***** 2024-11-18T02:30:45,244 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T02:30:45,244 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c4730a2bacf8,33905,1731897008067' ***** 2024-11-18T02:30:45,244 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T02:30:45,244 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:45,244 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33905-0x10128e905af0002, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:45,244 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:45,244 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T02:30:45,244 INFO [RS:0;c4730a2bacf8:42723 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T02:30:45,244 INFO [RS:0;c4730a2bacf8:42723 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T02:30:45,244 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T02:30:45,244 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(3091): Received CLOSE for 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:45,244 INFO [RS:1;c4730a2bacf8:33905 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T02:30:45,244 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T02:30:45,244 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T02:30:45,244 INFO [RS:1;c4730a2bacf8:33905 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T02:30:45,245 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(959): stopping server c4730a2bacf8,33905,1731897008067 2024-11-18T02:30:45,245 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(959): stopping server c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:45,245 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:30:45,245 INFO [RS:0;c4730a2bacf8:42723 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c4730a2bacf8:42723. 2024-11-18T02:30:45,245 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:30:45,245 DEBUG [RS:0;c4730a2bacf8:42723 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:45,245 DEBUG [RS:0;c4730a2bacf8:42723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:45,245 INFO [RS:1;c4730a2bacf8:33905 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c4730a2bacf8:33905. 2024-11-18T02:30:45,245 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 50432e567ed2d92a56c608107a1430aa, disabling compactions & flushes 2024-11-18T02:30:45,245 DEBUG [RS:1;c4730a2bacf8:33905 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:30:45,245 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T02:30:45,245 DEBUG [RS:1;c4730a2bacf8:33905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:45,245 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T02:30:45,245 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T02:30:45,245 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:45,245 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(976): stopping server c4730a2bacf8,33905,1731897008067; all regions closed. 2024-11-18T02:30:45,245 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T02:30:45,245 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:45,245 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. after waiting 0 ms 2024-11-18T02:30:45,245 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:45,245 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T02:30:45,246 DEBUG [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(1325): Online Regions={50432e567ed2d92a56c608107a1430aa=TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T02:30:45,246 DEBUG [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 50432e567ed2d92a56c608107a1430aa 2024-11-18T02:30:45,246 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:30:45,246 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:30:45,246 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:30:45,246 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,246 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:30:45,246 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:30:45,246 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,246 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/cad0e402632e476991567cfb9e15ddb6, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/076c2331091945ae859cbb3a01e27576, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/5c2b97e8cf994cb098a2e91515069bad, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/1ab80c68206c4b57a18e5f03d280e9d7, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/963afc47f19341d08c3a05b817f13cd5] to archive 2024-11-18T02:30:45,246 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,246 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-18T02:30:45,246 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,246 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,246 ERROR [FSHLog-0-hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29-prefix:c4730a2bacf8,42723,1731897006974.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,247 WARN [FSHLog-0-hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29-prefix:c4730a2bacf8,42723,1731897006974.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,247 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,247 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C42723%2C1731897006974.meta:.meta(num 1731897007934) roll requested 2024-11-18T02:30:45,247 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,247 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 2024-11-18T02:30:45,247 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C42723%2C1731897006974.meta.1731897045247.meta 2024-11-18T02:30:45,247 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T02:30:45,247 WARN [IPC Server handler 3 on default port 43001 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741837_1013 2024-11-18T02:30:45,248 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 after 1ms 2024-11-18T02:30:45,250 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/cad0e402632e476991567cfb9e15ddb6 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/cad0e402632e476991567cfb9e15ddb6 2024-11-18T02:30:45,251 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/076c2331091945ae859cbb3a01e27576 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/076c2331091945ae859cbb3a01e27576 2024-11-18T02:30:45,252 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/085911291a19419f8711e1835e2af862 2024-11-18T02:30:45,253 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,253 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,253 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,253 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,253 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,253 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897045247.meta 2024-11-18T02:30:45,254 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,254 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44805,DS-06189253-4a72-450b-8da7-8e486ea0adb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,254 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta 2024-11-18T02:30:45,254 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/5c2b97e8cf994cb098a2e91515069bad to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/5c2b97e8cf994cb098a2e91515069bad 2024-11-18T02:30:45,254 WARN [IPC Server handler 4 on default port 43001 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741834_1010 2024-11-18T02:30:45,254 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta after 0ms 2024-11-18T02:30:45,256 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/1ab80c68206c4b57a18e5f03d280e9d7 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/1ab80c68206c4b57a18e5f03d280e9d7 2024-11-18T02:30:45,257 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/963afc47f19341d08c3a05b817f13cd5 to hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/info/963afc47f19341d08c3a05b817f13cd5 2024-11-18T02:30:45,257 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c4730a2bacf8:36945 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T02:30:45,257 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cad0e402632e476991567cfb9e15ddb6=10347, 076c2331091945ae859cbb3a01e27576=12506, 085911291a19419f8711e1835e2af862=17994, 5c2b97e8cf994cb098a2e91515069bad=6027, 1ab80c68206c4b57a18e5f03d280e9d7=6027, 963afc47f19341d08c3a05b817f13cd5=11421] 2024-11-18T02:30:45,260 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43685:43685),(127.0.0.1/127.0.0.1:42207:42207)] 2024-11-18T02:30:45,260 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta is not closed yet, will try archiving it next time 2024-11-18T02:30:45,264 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/default/TestLogRolling-testLogRollOnDatanodeDeath/50432e567ed2d92a56c608107a1430aa/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-18T02:30:45,265 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:45,265 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 50432e567ed2d92a56c608107a1430aa: Waiting for close lock at 1731897045245Running coprocessor pre-close hooks at 1731897045245Disabling compacts and flushes for region at 1731897045245Disabling writes for close at 1731897045245Writing region close event to WAL at 1731897045261 (+16 ms)Running coprocessor post-close hooks at 1731897045265 (+4 ms)Closed at 1731897045265 2024-11-18T02:30:45,265 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa. 2024-11-18T02:30:45,277 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/.tmp/info/62d35513767a4d849db088ef48a8ecd9 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731897008201.50432e567ed2d92a56c608107a1430aa./info:regioninfo/1731897008583/Put/seqid=0 2024-11-18T02:30:45,278 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,279 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741887_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK], DatanodeInfoWithStorage[127.0.0.1:41301,DS-651837fb-ad10-491d-9c4d-9d12fa23b0c8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:45,279 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741887_1073 2024-11-18T02:30:45,279 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:45,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741888_1074 (size=7089) 2024-11-18T02:30:45,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741888_1074 (size=7089) 2024-11-18T02:30:45,284 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/.tmp/info/62d35513767a4d849db088ef48a8ecd9 2024-11-18T02:30:45,311 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/.tmp/ns/7ea6c925d4f0489990e5561af110eb20 is 43, key is default/ns:d/1731897007991/Put/seqid=0 2024-11-18T02:30:45,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741889_1075 (size=5153) 2024-11-18T02:30:45,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741889_1075 (size=5153) 2024-11-18T02:30:45,317 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/.tmp/ns/7ea6c925d4f0489990e5561af110eb20 2024-11-18T02:30:45,339 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/.tmp/table/281d57fced0d45a292ed7b920e0e0cf0 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731897008594/Put/seqid=0 2024-11-18T02:30:45,341 WARN [Thread-1063 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:30:45,341 WARN [Thread-1063 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1456477421-172.17.0.2-1731897006175:blk_1073741890_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK], DatanodeInfoWithStorage[127.0.0.1:37435,DS-acc0b290-c1ad-41ab-9663-21766ef7c4bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK]) is bad. 2024-11-18T02:30:45,341 WARN [Thread-1063 {}] hdfs.DataStreamer(1850): Abandoning BP-1456477421-172.17.0.2-1731897006175:blk_1073741890_1076 2024-11-18T02:30:45,341 WARN [Thread-1063 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43115,DS-4ea6c4c4-7b2f-4b4e-bd70-c14238526c4a,DISK] 2024-11-18T02:30:45,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741891_1077 (size=5424) 2024-11-18T02:30:45,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741891_1077 (size=5424) 2024-11-18T02:30:45,347 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/.tmp/table/281d57fced0d45a292ed7b920e0e0cf0 2024-11-18T02:30:45,354 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/.tmp/info/62d35513767a4d849db088ef48a8ecd9 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/info/62d35513767a4d849db088ef48a8ecd9 2024-11-18T02:30:45,360 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/info/62d35513767a4d849db088ef48a8ecd9, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T02:30:45,361 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/.tmp/ns/7ea6c925d4f0489990e5561af110eb20 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/ns/7ea6c925d4f0489990e5561af110eb20 2024-11-18T02:30:45,367 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/ns/7ea6c925d4f0489990e5561af110eb20, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T02:30:45,368 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/.tmp/table/281d57fced0d45a292ed7b920e0e0cf0 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/table/281d57fced0d45a292ed7b920e0e0cf0 2024-11-18T02:30:45,373 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/table/281d57fced0d45a292ed7b920e0e0cf0, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T02:30:45,374 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=11, compaction requested=false 2024-11-18T02:30:45,381 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T02:30:45,381 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:30:45,381 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:30:45,382 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897045246Running coprocessor pre-close hooks at 1731897045246Disabling compacts and flushes for region at 1731897045246Disabling writes for close at 1731897045246Obtaining lock to block concurrent updates at 1731897045246Preparing flush snapshotting stores in 1588230740 at 1731897045246Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731897045247 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731897045261 (+14 ms)Flushing 1588230740/info: creating writer at 1731897045261Flushing 1588230740/info: appending metadata at 1731897045276 (+15 ms)Flushing 1588230740/info: closing flushed file at 1731897045276Flushing 1588230740/ns: creating writer at 1731897045291 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731897045311 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731897045311Flushing 1588230740/table: creating writer at 1731897045323 (+12 ms)Flushing 1588230740/table: appending metadata at 1731897045338 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731897045338Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39e544e8: reopening flushed file at 1731897045353 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e8784e2: reopening flushed file at 1731897045360 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c6da4e2: reopening flushed file at 1731897045367 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=11, compaction requested=false at 1731897045375 (+8 ms)Writing region close event to WAL at 1731897045377 (+2 ms)Running coprocessor post-close hooks at 1731897045381 (+4 ms)Closed at 1731897045381 2024-11-18T02:30:45,382 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T02:30:45,387 INFO [regionserver/c4730a2bacf8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T02:30:45,388 INFO [regionserver/c4730a2bacf8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:30:45,388 INFO [regionserver/c4730a2bacf8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T02:30:45,446 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(976): stopping server c4730a2bacf8,42723,1731897006974; all regions closed. 2024-11-18T02:30:45,446 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,447 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,447 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,447 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,447 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:45,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741886_1071 (size=825) 2024-11-18T02:30:45,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741886_1071 (size=825) 2024-11-18T02:30:45,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741862_1045 (size=13591) 2024-11-18T02:30:46,160 INFO [regionserver/c4730a2bacf8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T02:30:46,160 INFO [regionserver/c4730a2bacf8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T02:30:46,164 INFO [regionserver/c4730a2bacf8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:30:46,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741836_1012 (size=76) 2024-11-18T02:30:46,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:30:47,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:30:47,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:30:47,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T02:30:47,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:30:47,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:30:48,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:30:48,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:30:48,572 INFO [master/c4730a2bacf8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T02:30:48,572 INFO [master/c4730a2bacf8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T02:30:49,249 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 after 4002ms 2024-11-18T02:30:49,255 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta after 4001ms 2024-11-18T02:30:49,497 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2610f761 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1456477421-172.17.0.2-1731897006175:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:44805,null,null]) java.net.ConnectException: Call From c4730a2bacf8/172.17.0.2 to localhost:33871 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T02:30:50,247 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T02:30:50,249 DEBUG [RS:1;c4730a2bacf8:33905 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs 2024-11-18T02:30:50,249 INFO [RS:1;c4730a2bacf8:33905 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C33905%2C1731897008067:(num 1731897008303) 2024-11-18T02:30:50,249 DEBUG [RS:1;c4730a2bacf8:33905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:50,249 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:30:50,249 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:30:50,250 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.ChoreService(370): Chore service for: regionserver/c4730a2bacf8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T02:30:50,250 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T02:30:50,250 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:30:50,250 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T02:30:50,250 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T02:30:50,250 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:30:50,250 INFO [RS:1;c4730a2bacf8:33905 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33905 2024-11-18T02:30:50,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:30:50,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10128e905af0002, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c4730a2bacf8,33905,1731897008067 2024-11-18T02:30:50,252 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:30:50,254 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c4730a2bacf8,33905,1731897008067] 2024-11-18T02:30:50,255 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c4730a2bacf8,33905,1731897008067 already deleted, retry=false 2024-11-18T02:30:50,255 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c4730a2bacf8,33905,1731897008067 expired; onlineServers=1 2024-11-18T02:30:50,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:50,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10128e905af0002, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:50,354 INFO [RS:1;c4730a2bacf8:33905 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:30:50,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33905-0x10128e905af0002, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:50,354 INFO [RS:1;c4730a2bacf8:33905 {}] regionserver.HRegionServer(1031): Exiting; stopping=c4730a2bacf8,33905,1731897008067; zookeeper connection closed. 2024-11-18T02:30:50,355 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2c946954 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2c946954 2024-11-18T02:30:50,447 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T02:30:50,451 DEBUG [RS:0;c4730a2bacf8:42723 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs 2024-11-18T02:30:50,451 INFO [RS:0;c4730a2bacf8:42723 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C42723%2C1731897006974.meta:.meta(num 1731897045247) 2024-11-18T02:30:50,451 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,451 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,452 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,452 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,452 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741881_1065 (size=16308) 2024-11-18T02:30:50,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741881_1065 (size=16308) 2024-11-18T02:30:50,457 DEBUG [RS:0;c4730a2bacf8:42723 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/oldWALs 2024-11-18T02:30:50,457 INFO [RS:0;c4730a2bacf8:42723 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C42723%2C1731897006974:(num 1731897044804) 2024-11-18T02:30:50,457 DEBUG [RS:0;c4730a2bacf8:42723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:50,457 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:30:50,457 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:30:50,457 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.ChoreService(370): Chore service for: regionserver/c4730a2bacf8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T02:30:50,457 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:30:50,457 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:30:50,457 INFO [RS:0;c4730a2bacf8:42723 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42723 2024-11-18T02:30:50,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c4730a2bacf8,42723,1731897006974 2024-11-18T02:30:50,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:30:50,461 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:30:50,462 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c4730a2bacf8,42723,1731897006974] 2024-11-18T02:30:50,464 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c4730a2bacf8,42723,1731897006974 already deleted, retry=false 2024-11-18T02:30:50,464 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c4730a2bacf8,42723,1731897006974 expired; onlineServers=0 2024-11-18T02:30:50,464 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c4730a2bacf8,36945,1731897006897' ***** 2024-11-18T02:30:50,464 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T02:30:50,464 INFO [M:0;c4730a2bacf8:36945 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:30:50,464 INFO [M:0;c4730a2bacf8:36945 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:30:50,464 DEBUG [M:0;c4730a2bacf8:36945 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T02:30:50,465 DEBUG [M:0;c4730a2bacf8:36945 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T02:30:50,465 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T02:30:50,465 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897007288 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897007288,5,FailOnTimeoutGroup] 2024-11-18T02:30:50,465 INFO [M:0;c4730a2bacf8:36945 {}] hbase.ChoreService(370): Chore service for: master/c4730a2bacf8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T02:30:50,465 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897007291 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897007291,5,FailOnTimeoutGroup] 2024-11-18T02:30:50,465 INFO [M:0;c4730a2bacf8:36945 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:30:50,465 DEBUG [M:0;c4730a2bacf8:36945 {}] master.HMaster(1795): Stopping service threads 2024-11-18T02:30:50,465 INFO [M:0;c4730a2bacf8:36945 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T02:30:50,465 INFO [M:0;c4730a2bacf8:36945 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:30:50,466 INFO [M:0;c4730a2bacf8:36945 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T02:30:50,466 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T02:30:50,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T02:30:50,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:50,466 DEBUG [M:0;c4730a2bacf8:36945 {}] zookeeper.ZKUtil(347): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T02:30:50,467 WARN [M:0;c4730a2bacf8:36945 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T02:30:50,467 INFO [M:0;c4730a2bacf8:36945 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/.lastflushedseqids 2024-11-18T02:30:50,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741892_1078 (size=130) 2024-11-18T02:30:50,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741892_1078 (size=130) 2024-11-18T02:30:50,474 INFO [M:0;c4730a2bacf8:36945 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T02:30:50,474 INFO [M:0;c4730a2bacf8:36945 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T02:30:50,474 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:30:50,474 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:50,474 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:50,474 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:30:50,474 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:50,474 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-18T02:30:50,492 DEBUG [M:0;c4730a2bacf8:36945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8a641faf7e146a2a539831fe062d02a is 82, key is hbase:meta,,1/info:regioninfo/1731897007969/Put/seqid=0 2024-11-18T02:30:50,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741893_1079 (size=5672) 2024-11-18T02:30:50,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741893_1079 (size=5672) 2024-11-18T02:30:50,497 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8a641faf7e146a2a539831fe062d02a 2024-11-18T02:30:50,519 DEBUG [M:0;c4730a2bacf8:36945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/74ed68ad58de4210b706d6835b23f706 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731897008599/Put/seqid=0 2024-11-18T02:30:50,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741894_1080 (size=6255) 2024-11-18T02:30:50,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741894_1080 (size=6255) 2024-11-18T02:30:50,526 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/74ed68ad58de4210b706d6835b23f706 2024-11-18T02:30:50,531 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 74ed68ad58de4210b706d6835b23f706 2024-11-18T02:30:50,546 DEBUG [M:0;c4730a2bacf8:36945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4234526aec324b06b528512b09614cb7 is 69, key is c4730a2bacf8,33905,1731897008067/rs:state/1731897008136/Put/seqid=0 2024-11-18T02:30:50,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741895_1081 (size=5224) 2024-11-18T02:30:50,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741895_1081 (size=5224) 2024-11-18T02:30:50,552 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4234526aec324b06b528512b09614cb7 2024-11-18T02:30:50,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:50,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42723-0x10128e905af0001, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:50,563 INFO [RS:0;c4730a2bacf8:42723 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:30:50,563 INFO [RS:0;c4730a2bacf8:42723 {}] regionserver.HRegionServer(1031): Exiting; stopping=c4730a2bacf8,42723,1731897006974; zookeeper connection closed. 2024-11-18T02:30:50,563 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52949f52 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52949f52 2024-11-18T02:30:50,563 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-18T02:30:50,571 DEBUG [M:0;c4730a2bacf8:36945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b7bce7909394096a22dc318ead12d0f is 52, key is load_balancer_on/state:d/1731897008043/Put/seqid=0 2024-11-18T02:30:50,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741896_1082 (size=5056) 2024-11-18T02:30:50,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741896_1082 (size=5056) 2024-11-18T02:30:50,576 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b7bce7909394096a22dc318ead12d0f 2024-11-18T02:30:50,581 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8a641faf7e146a2a539831fe062d02a as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a8a641faf7e146a2a539831fe062d02a 2024-11-18T02:30:50,586 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a8a641faf7e146a2a539831fe062d02a, entries=8, sequenceid=60, filesize=5.5 K 2024-11-18T02:30:50,587 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/74ed68ad58de4210b706d6835b23f706 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/74ed68ad58de4210b706d6835b23f706 2024-11-18T02:30:50,591 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 74ed68ad58de4210b706d6835b23f706 2024-11-18T02:30:50,591 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/74ed68ad58de4210b706d6835b23f706, entries=6, sequenceid=60, filesize=6.1 K 2024-11-18T02:30:50,592 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4234526aec324b06b528512b09614cb7 as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4234526aec324b06b528512b09614cb7 2024-11-18T02:30:50,597 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4234526aec324b06b528512b09614cb7, entries=2, sequenceid=60, filesize=5.1 K 2024-11-18T02:30:50,598 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b7bce7909394096a22dc318ead12d0f as hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2b7bce7909394096a22dc318ead12d0f 2024-11-18T02:30:50,603 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2b7bce7909394096a22dc318ead12d0f, entries=1, sequenceid=60, filesize=4.9 K 2024-11-18T02:30:50,604 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=60, compaction requested=false 2024-11-18T02:30:50,606 INFO [M:0;c4730a2bacf8:36945 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:50,606 DEBUG [M:0;c4730a2bacf8:36945 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897050474Disabling compacts and flushes for region at 1731897050474Disabling writes for close at 1731897050474Obtaining lock to block concurrent updates at 1731897050474Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731897050474Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731897050475 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731897050475Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731897050476 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731897050491 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731897050491Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731897050503 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731897050519 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731897050519Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731897050531 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731897050546 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731897050546Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731897050557 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731897050570 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731897050571 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@214c5361: reopening flushed file at 1731897050581 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@548e447d: reopening flushed file at 1731897050586 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38487c9e: reopening flushed file at 1731897050591 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@579305a5: reopening flushed file at 1731897050597 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=60, compaction requested=false at 1731897050604 (+7 ms)Writing region close event to WAL at 1731897050606 (+2 ms)Closed at 1731897050606 2024-11-18T02:30:50,606 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,606 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,606 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,607 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,607 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:30:50,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41301 is added to blk_1073741879_1062 (size=1045) 2024-11-18T02:30:50,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37435 is added to blk_1073741879_1062 (size=1045) 2024-11-18T02:30:50,609 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:30:50,609 INFO [M:0;c4730a2bacf8:36945 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T02:30:50,609 INFO [M:0;c4730a2bacf8:36945 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36945 2024-11-18T02:30:50,610 INFO [M:0;c4730a2bacf8:36945 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:30:50,713 INFO [M:0;c4730a2bacf8:36945 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:30:50,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:50,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36945-0x10128e905af0000, quorum=127.0.0.1:55099, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:30:50,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3bac5d7c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:50,716 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ecaf507{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:50,716 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:50,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79e92ca8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:50,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c9b51ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:50,718 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:50,718 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:50,718 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:50,718 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1456477421-172.17.0.2-1731897006175 (Datanode Uuid 35b9dbaa-4267-42f2-98b7-389d3e5f4a4e) service to localhost/127.0.0.1:43001 2024-11-18T02:30:50,718 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@43583b6a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1456477421-172.17.0.2-1731897006175:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:44805,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:33871 , LocalHost:localPort c4730a2bacf8/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T02:30:50,718 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@43583b6a {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1456477421-172.17.0.2-1731897006175:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41301,null,null], DatanodeInfoWithStorage[127.0.0.1:44805,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1456477421-172.17.0.2-1731897006175 2024-11-18T02:30:50,719 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data3/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:50,719 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@43583b6a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44805,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1456477421-172.17.0.2-1731897006175 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:50,719 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@43583b6a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41301,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1456477421-172.17.0.2-1731897006175 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:30:50,719 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data4/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:50,719 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@43583b6a {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:44805,null,null], DatanodeInfoWithStorage[127.0.0.1:41301,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1456477421-172.17.0.2-1731897006175:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:44805,null,null], DatanodeInfoWithStorage[127.0.0.1:41301,null,null]] 2024-11-18T02:30:50,719 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:50,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@92842f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:50,722 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a677b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:50,722 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:50,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e96eece{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:50,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eebeea4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:50,723 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:30:50,723 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:30:50,723 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:30:50,723 WARN [BP-1456477421-172.17.0.2-1731897006175 heartbeating to localhost/127.0.0.1:43001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1456477421-172.17.0.2-1731897006175 (Datanode Uuid 9a7121b3-2bd2-43d9-97a2-7e69ac76ec76) service to localhost/127.0.0.1:43001 2024-11-18T02:30:50,724 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data9/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:50,724 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/cluster_43c8636e-0683-7910-5f1c-69bc705b000c/data/data10/current/BP-1456477421-172.17.0.2-1731897006175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:30:50,724 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:30:50,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10fae299{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:30:50,730 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ab5393f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:30:50,730 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:30:50,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67cf8368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:30:50,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5493a194{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir/,STOPPED} 2024-11-18T02:30:50,741 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T02:30:50,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T02:30:50,777 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 81) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43001 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43001 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:43001 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f0a0cbef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:33521 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:43001 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f0a0cbef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43001 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33521 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43001 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43001 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=158 (was 203), ProcessCount=11 (was 11), AvailableMemoryMB=3080 (was 3038) - AvailableMemoryMB LEAK? - 2024-11-18T02:30:50,784 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=158, ProcessCount=11, AvailableMemoryMB=3080 2024-11-18T02:30:50,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T02:30:50,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.log.dir so I do NOT create it in target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784 2024-11-18T02:30:50,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7dd6b972-eb63-07da-5da1-a3c0803d0fde/hadoop.tmp.dir so I do NOT create it in target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784 2024-11-18T02:30:50,784 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136, deleteOnExit=true 2024-11-18T02:30:50,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T02:30:50,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/test.cache.data in system properties and HBase conf 2024-11-18T02:30:50,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T02:30:50,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir in system properties and HBase conf 2024-11-18T02:30:50,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T02:30:50,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T02:30:50,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T02:30:50,785 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T02:30:50,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:30:50,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:30:50,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/nfs.dump.dir in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/java.io.tmpdir in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T02:30:50,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T02:30:50,788 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T02:30:50,799 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:30:50,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:50,886 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:50,893 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:50,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:50,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:50,894 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:30:50,897 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:50,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e179503{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:50,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@482b9b0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:51,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45628471{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/java.io.tmpdir/jetty-localhost-45585-hadoop-hdfs-3_4_1-tests_jar-_-any-6565917518173199028/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:30:51,013 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@371f8296{HTTP/1.1, (http/1.1)}{localhost:45585} 2024-11-18T02:30:51,013 INFO [Time-limited test {}] server.Server(415): Started @149469ms 2024-11-18T02:30:51,026 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:30:51,097 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:51,101 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:51,101 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:51,101 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:51,101 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:30:51,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43a454f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:51,102 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70df7796{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:51,216 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39a85688{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/java.io.tmpdir/jetty-localhost-37259-hadoop-hdfs-3_4_1-tests_jar-_-any-13569637168452913895/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:51,216 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a011456{HTTP/1.1, (http/1.1)}{localhost:37259} 2024-11-18T02:30:51,217 INFO [Time-limited test {}] server.Server(415): Started @149673ms 2024-11-18T02:30:51,218 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:51,247 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:30:51,252 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:30:51,253 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:30:51,253 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:30:51,253 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:30:51,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3192c1d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:30:51,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cfd21d9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:30:51,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:51,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:51,320 WARN [Thread-1186 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data2/current/BP-568169947-172.17.0.2-1731897050829/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:51,320 WARN [Thread-1185 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data1/current/BP-568169947-172.17.0.2-1731897050829/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:51,336 WARN [Thread-1164 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:51,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa0f7c0b09b24ae49 with lease ID 0x5fe9e8a464d50c10: Processing first storage report for DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b from datanode DatanodeRegistration(127.0.0.1:36451, datanodeUuid=9b45d3d3-3d89-43d5-9941-e1bd1904a9c3, infoPort=45007, infoSecurePort=0, ipcPort=32817, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829) 2024-11-18T02:30:51,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa0f7c0b09b24ae49 with lease ID 0x5fe9e8a464d50c10: from storage DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b node DatanodeRegistration(127.0.0.1:36451, datanodeUuid=9b45d3d3-3d89-43d5-9941-e1bd1904a9c3, infoPort=45007, infoSecurePort=0, ipcPort=32817, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:51,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa0f7c0b09b24ae49 with lease ID 0x5fe9e8a464d50c10: Processing first storage report for DS-ef7a441c-ba73-4da5-8b70-4d4390d9c317 from datanode DatanodeRegistration(127.0.0.1:36451, datanodeUuid=9b45d3d3-3d89-43d5-9941-e1bd1904a9c3, infoPort=45007, infoSecurePort=0, ipcPort=32817, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829) 2024-11-18T02:30:51,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa0f7c0b09b24ae49 with lease ID 0x5fe9e8a464d50c10: from storage DS-ef7a441c-ba73-4da5-8b70-4d4390d9c317 node DatanodeRegistration(127.0.0.1:36451, datanodeUuid=9b45d3d3-3d89-43d5-9941-e1bd1904a9c3, infoPort=45007, infoSecurePort=0, ipcPort=32817, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:51,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@23accf28{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/java.io.tmpdir/jetty-localhost-36793-hadoop-hdfs-3_4_1-tests_jar-_-any-11063064256213183567/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:30:51,372 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34e43275{HTTP/1.1, (http/1.1)}{localhost:36793} 2024-11-18T02:30:51,372 INFO [Time-limited test {}] server.Server(415): Started @149829ms 2024-11-18T02:30:51,374 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:30:51,467 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data3/current/BP-568169947-172.17.0.2-1731897050829/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:51,467 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data4/current/BP-568169947-172.17.0.2-1731897050829/current, will proceed with Du for space computation calculation, 2024-11-18T02:30:51,490 WARN [Thread-1200 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:30:51,492 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69edfb596fdcf119 with lease ID 0x5fe9e8a464d50c11: Processing first storage report for DS-b1452ce0-ff81-474f-ab85-6834de112d24 from datanode DatanodeRegistration(127.0.0.1:40633, datanodeUuid=4b122838-ac19-41cc-872c-933de20bbf9f, infoPort=42425, infoSecurePort=0, ipcPort=33445, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829) 2024-11-18T02:30:51,492 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69edfb596fdcf119 with lease ID 0x5fe9e8a464d50c11: from storage DS-b1452ce0-ff81-474f-ab85-6834de112d24 node DatanodeRegistration(127.0.0.1:40633, datanodeUuid=4b122838-ac19-41cc-872c-933de20bbf9f, infoPort=42425, infoSecurePort=0, ipcPort=33445, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:51,492 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69edfb596fdcf119 with lease ID 0x5fe9e8a464d50c11: Processing first storage report for DS-b54e9ad4-024a-43f7-8b32-3691f5426c00 from datanode DatanodeRegistration(127.0.0.1:40633, datanodeUuid=4b122838-ac19-41cc-872c-933de20bbf9f, infoPort=42425, infoSecurePort=0, ipcPort=33445, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829) 2024-11-18T02:30:51,492 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69edfb596fdcf119 with lease ID 0x5fe9e8a464d50c11: from storage DS-b54e9ad4-024a-43f7-8b32-3691f5426c00 node DatanodeRegistration(127.0.0.1:40633, datanodeUuid=4b122838-ac19-41cc-872c-933de20bbf9f, infoPort=42425, infoSecurePort=0, ipcPort=33445, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:30:51,497 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784 2024-11-18T02:30:51,500 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/zookeeper_0, clientPort=59711, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T02:30:51,501 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59711 2024-11-18T02:30:51,501 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:51,502 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:51,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36451 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:30:51,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40633 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:30:51,511 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c with version=8 2024-11-18T02:30:51,511 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/hbase-staging 2024-11-18T02:30:51,514 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:30:51,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:51,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:51,514 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:30:51,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:51,514 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:30:51,514 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T02:30:51,514 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:30:51,515 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40797 2024-11-18T02:30:51,516 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40797 connecting to ZooKeeper ensemble=127.0.0.1:59711 2024-11-18T02:30:51,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:407970x0, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:30:51,523 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40797-0x10128e9b3fb0000 connected 2024-11-18T02:30:51,539 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:51,540 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:51,543 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:51,543 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c, hbase.cluster.distributed=false 2024-11-18T02:30:51,545 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:30:51,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40797 2024-11-18T02:30:51,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40797 2024-11-18T02:30:51,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40797 2024-11-18T02:30:51,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40797 2024-11-18T02:30:51,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40797 2024-11-18T02:30:51,562 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:30:51,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:51,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:51,562 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:30:51,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:30:51,562 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:30:51,562 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T02:30:51,562 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:30:51,563 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33923 2024-11-18T02:30:51,564 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33923 connecting to ZooKeeper ensemble=127.0.0.1:59711 2024-11-18T02:30:51,565 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:51,566 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:51,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339230x0, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:30:51,572 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:339230x0, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:30:51,572 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33923-0x10128e9b3fb0001 connected 2024-11-18T02:30:51,573 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T02:30:51,573 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T02:30:51,574 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T02:30:51,575 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:30:51,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33923 2024-11-18T02:30:51,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33923 2024-11-18T02:30:51,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33923 2024-11-18T02:30:51,576 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33923 2024-11-18T02:30:51,576 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33923 2024-11-18T02:30:51,587 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c4730a2bacf8:40797 2024-11-18T02:30:51,588 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c4730a2bacf8,40797,1731897051513 2024-11-18T02:30:51,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:51,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:51,589 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c4730a2bacf8,40797,1731897051513 2024-11-18T02:30:51,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T02:30:51,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,591 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T02:30:51,592 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c4730a2bacf8,40797,1731897051513 from backup master directory 2024-11-18T02:30:51,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c4730a2bacf8,40797,1731897051513 2024-11-18T02:30:51,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:51,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:30:51,593 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:30:51,593 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c4730a2bacf8,40797,1731897051513 2024-11-18T02:30:51,597 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/hbase.id] with ID: 88be58f2-4c55-4b85-9bc0-3d904753f69e 2024-11-18T02:30:51,597 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/.tmp/hbase.id 2024-11-18T02:30:51,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40633 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:30:51,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36451 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:30:51,603 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/.tmp/hbase.id]:[hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/hbase.id] 2024-11-18T02:30:51,614 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:51,614 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T02:30:51,616 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T02:30:51,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40633 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:30:51,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36451 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:30:51,628 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:30:51,629 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T02:30:51,629 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:51,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40633 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:30:51,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36451 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:30:51,637 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store 2024-11-18T02:30:51,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36451 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:30:51,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40633 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:30:51,645 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:51,645 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:30:51,645 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:51,645 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:51,645 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:30:51,645 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:51,645 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:30:51,645 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897051645Disabling compacts and flushes for region at 1731897051645Disabling writes for close at 1731897051645Writing region close event to WAL at 1731897051645Closed at 1731897051645 2024-11-18T02:30:51,646 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/.initializing 2024-11-18T02:30:51,646 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513 2024-11-18T02:30:51,648 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C40797%2C1731897051513, suffix=, logDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513, archiveDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/oldWALs, maxLogs=10 2024-11-18T02:30:51,649 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C40797%2C1731897051513.1731897051649 2024-11-18T02:30:51,653 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 2024-11-18T02:30:51,654 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42425:42425),(127.0.0.1/127.0.0.1:45007:45007)] 2024-11-18T02:30:51,655 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:30:51,655 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:51,655 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,655 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,656 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,657 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T02:30:51,657 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:51,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:51,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T02:30:51,659 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:51,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:51,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,661 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T02:30:51,661 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:51,661 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:51,661 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,662 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T02:30:51,662 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:51,663 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:51,663 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,663 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,664 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,665 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,665 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,665 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T02:30:51,666 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:30:51,668 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:30:51,669 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848207, jitterRate=0.07855100929737091}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T02:30:51,670 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731897051655Initializing all the Stores at 1731897051656 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897051656Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897051656Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897051656Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897051656Cleaning up temporary data from old regions at 1731897051665 (+9 ms)Region opened successfully at 1731897051670 (+5 ms) 2024-11-18T02:30:51,672 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T02:30:51,676 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64117eeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:30:51,677 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T02:30:51,677 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T02:30:51,677 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T02:30:51,677 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T02:30:51,677 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T02:30:51,678 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T02:30:51,678 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T02:30:51,680 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T02:30:51,681 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T02:30:51,682 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T02:30:51,682 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T02:30:51,683 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T02:30:51,684 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T02:30:51,684 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T02:30:51,685 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T02:30:51,686 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T02:30:51,687 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T02:30:51,689 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T02:30:51,691 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T02:30:51,693 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T02:30:51,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:51,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:30:51,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,695 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c4730a2bacf8,40797,1731897051513, sessionid=0x10128e9b3fb0000, setting cluster-up flag (Was=false) 2024-11-18T02:30:51,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,703 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T02:30:51,704 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,40797,1731897051513 2024-11-18T02:30:51,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:51,715 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T02:30:51,716 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,40797,1731897051513 2024-11-18T02:30:51,717 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T02:30:51,719 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:51,719 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T02:30:51,719 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T02:30:51,719 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c4730a2bacf8,40797,1731897051513 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T02:30:51,720 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:51,720 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:51,720 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:51,721 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:30:51,721 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c4730a2bacf8:0, corePoolSize=10, maxPoolSize=10 2024-11-18T02:30:51,721 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,721 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:30:51,721 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,722 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:51,722 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T02:30:51,723 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:51,724 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T02:30:51,725 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731897081725 2024-11-18T02:30:51,726 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T02:30:51,726 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T02:30:51,726 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T02:30:51,726 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T02:30:51,726 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T02:30:51,726 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T02:30:51,726 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,726 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T02:30:51,727 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T02:30:51,727 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T02:30:51,727 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T02:30:51,727 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T02:30:51,727 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897051727,5,FailOnTimeoutGroup] 2024-11-18T02:30:51,728 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897051727,5,FailOnTimeoutGroup] 2024-11-18T02:30:51,728 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,728 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T02:30:51,728 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,728 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36451 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:30:51,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40633 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:30:51,733 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T02:30:51,733 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c 2024-11-18T02:30:51,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36451 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:30:51,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40633 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:30:51,741 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:51,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:30:51,743 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:30:51,743 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:51,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:51,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:30:51,745 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:30:51,745 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:51,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:51,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:30:51,747 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:30:51,747 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:51,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:51,747 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:30:51,748 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:30:51,749 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:51,749 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:51,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:30:51,750 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740 2024-11-18T02:30:51,750 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740 2024-11-18T02:30:51,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:30:51,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:30:51,752 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:30:51,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:30:51,754 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:30:51,755 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695567, jitterRate=-0.11554116010665894}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:30:51,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731897051741Initializing all the Stores at 1731897051742 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897051742Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897051742Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897051742Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897051742Cleaning up temporary data from old regions at 1731897051751 (+9 ms)Region opened successfully at 1731897051756 (+5 ms) 2024-11-18T02:30:51,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:30:51,756 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:30:51,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:30:51,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:30:51,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:30:51,756 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:30:51,756 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897051756Disabling compacts and flushes for region at 1731897051756Disabling writes for close at 1731897051756Writing region close event to WAL at 1731897051756Closed at 1731897051756 2024-11-18T02:30:51,758 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:51,758 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T02:30:51,758 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T02:30:51,759 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:30:51,760 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T02:30:51,778 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(746): ClusterId : 88be58f2-4c55-4b85-9bc0-3d904753f69e 2024-11-18T02:30:51,778 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T02:30:51,780 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T02:30:51,780 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T02:30:51,782 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T02:30:51,782 DEBUG [RS:0;c4730a2bacf8:33923 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f52f4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:30:51,794 DEBUG [RS:0;c4730a2bacf8:33923 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c4730a2bacf8:33923 2024-11-18T02:30:51,794 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T02:30:51,794 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T02:30:51,794 DEBUG [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T02:30:51,795 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(2659): reportForDuty to master=c4730a2bacf8,40797,1731897051513 with port=33923, startcode=1731897051562 2024-11-18T02:30:51,795 DEBUG [RS:0;c4730a2bacf8:33923 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T02:30:51,797 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37861, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T02:30:51,797 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40797 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:51,797 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40797 {}] master.ServerManager(517): Registering regionserver=c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:51,799 DEBUG [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c 2024-11-18T02:30:51,799 DEBUG [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34129 2024-11-18T02:30:51,799 DEBUG [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T02:30:51,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:30:51,801 DEBUG [RS:0;c4730a2bacf8:33923 {}] zookeeper.ZKUtil(111): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:51,801 WARN [RS:0;c4730a2bacf8:33923 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:30:51,801 INFO [RS:0;c4730a2bacf8:33923 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:51,801 DEBUG [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:51,802 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c4730a2bacf8,33923,1731897051562] 2024-11-18T02:30:51,805 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T02:30:51,806 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T02:30:51,808 INFO [RS:0;c4730a2bacf8:33923 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T02:30:51,808 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,808 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T02:30:51,809 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T02:30:51,809 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:30:51,809 DEBUG [RS:0;c4730a2bacf8:33923 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:30:51,812 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,812 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,812 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,812 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,812 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,812 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33923,1731897051562-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:30:51,827 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T02:30:51,827 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33923,1731897051562-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,827 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,827 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.Replication(171): c4730a2bacf8,33923,1731897051562 started 2024-11-18T02:30:51,842 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:51,842 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(1482): Serving as c4730a2bacf8,33923,1731897051562, RpcServer on c4730a2bacf8/172.17.0.2:33923, sessionid=0x10128e9b3fb0001 2024-11-18T02:30:51,842 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T02:30:51,842 DEBUG [RS:0;c4730a2bacf8:33923 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:51,842 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,33923,1731897051562' 2024-11-18T02:30:51,842 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T02:30:51,843 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T02:30:51,843 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T02:30:51,843 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T02:30:51,843 DEBUG [RS:0;c4730a2bacf8:33923 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:51,843 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,33923,1731897051562' 2024-11-18T02:30:51,843 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T02:30:51,843 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T02:30:51,844 DEBUG [RS:0;c4730a2bacf8:33923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T02:30:51,844 INFO [RS:0;c4730a2bacf8:33923 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T02:30:51,844 INFO [RS:0;c4730a2bacf8:33923 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T02:30:51,910 WARN [c4730a2bacf8:40797 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T02:30:51,946 INFO [RS:0;c4730a2bacf8:33923 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C33923%2C1731897051562, suffix=, logDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562, archiveDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/oldWALs, maxLogs=32 2024-11-18T02:30:51,947 INFO [RS:0;c4730a2bacf8:33923 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33923%2C1731897051562.1731897051947 2024-11-18T02:30:51,953 INFO [RS:0;c4730a2bacf8:33923 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 2024-11-18T02:30:51,954 DEBUG [RS:0;c4730a2bacf8:33923 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42425:42425),(127.0.0.1/127.0.0.1:45007:45007)] 2024-11-18T02:30:52,160 DEBUG [c4730a2bacf8:40797 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T02:30:52,161 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:52,162 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,33923,1731897051562, state=OPENING 2024-11-18T02:30:52,166 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T02:30:52,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:52,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:30:52,168 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:30:52,168 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:52,168 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:52,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,33923,1731897051562}] 2024-11-18T02:30:52,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:52,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:52,321 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T02:30:52,323 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35257, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T02:30:52,327 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T02:30:52,327 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:30:52,329 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C33923%2C1731897051562.meta, suffix=.meta, logDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562, archiveDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/oldWALs, maxLogs=32 2024-11-18T02:30:52,329 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta 2024-11-18T02:30:52,334 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta 2024-11-18T02:30:52,335 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42425:42425),(127.0.0.1/127.0.0.1:45007:45007)] 2024-11-18T02:30:52,336 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:30:52,336 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T02:30:52,336 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T02:30:52,336 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T02:30:52,336 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T02:30:52,337 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:52,337 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T02:30:52,337 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T02:30:52,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:30:52,339 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:30:52,339 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:52,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:52,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:30:52,340 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:30:52,340 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:52,341 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:52,341 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:30:52,341 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:30:52,341 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:52,342 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:52,342 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:30:52,343 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:30:52,343 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:52,343 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:30:52,343 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:30:52,344 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740 2024-11-18T02:30:52,345 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740 2024-11-18T02:30:52,346 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:30:52,346 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:30:52,347 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:30:52,348 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:30:52,349 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880572, jitterRate=0.11970610916614532}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:30:52,349 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T02:30:52,350 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731897052337Writing region info on filesystem at 1731897052337Initializing all the Stores at 1731897052338 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897052338Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897052338Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897052338Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897052338Cleaning up temporary data from old regions at 1731897052346 (+8 ms)Running coprocessor post-open hooks at 1731897052349 (+3 ms)Region opened successfully at 1731897052349 2024-11-18T02:30:52,351 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731897052321 2024-11-18T02:30:52,353 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T02:30:52,353 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T02:30:52,354 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:52,354 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,33923,1731897051562, state=OPEN 2024-11-18T02:30:52,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:30:52,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:30:52,358 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:52,358 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:52,358 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:30:52,361 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T02:30:52,361 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,33923,1731897051562 in 190 msec 2024-11-18T02:30:52,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T02:30:52,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-18T02:30:52,365 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:30:52,365 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T02:30:52,366 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:30:52,366 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,33923,1731897051562, seqNum=-1] 2024-11-18T02:30:52,367 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:30:52,368 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53117, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:30:52,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 654 msec 2024-11-18T02:30:52,374 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731897052374, completionTime=-1 2024-11-18T02:30:52,374 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T02:30:52,374 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T02:30:52,376 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T02:30:52,376 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731897112376 2024-11-18T02:30:52,376 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731897172376 2024-11-18T02:30:52,376 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T02:30:52,376 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,40797,1731897051513-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:52,376 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,40797,1731897051513-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:52,376 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,40797,1731897051513-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:52,376 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c4730a2bacf8:40797, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:52,376 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:52,377 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:52,378 DEBUG [master/c4730a2bacf8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T02:30:52,380 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.787sec 2024-11-18T02:30:52,380 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T02:30:52,380 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T02:30:52,380 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T02:30:52,380 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T02:30:52,380 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T02:30:52,380 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,40797,1731897051513-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:30:52,380 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,40797,1731897051513-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T02:30:52,383 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T02:30:52,383 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T02:30:52,383 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,40797,1731897051513-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:30:52,478 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d692efe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:30:52,478 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c4730a2bacf8,40797,-1 for getting cluster id 2024-11-18T02:30:52,478 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T02:30:52,480 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '88be58f2-4c55-4b85-9bc0-3d904753f69e' 2024-11-18T02:30:52,480 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T02:30:52,481 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "88be58f2-4c55-4b85-9bc0-3d904753f69e" 2024-11-18T02:30:52,481 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ca8f0f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:30:52,481 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c4730a2bacf8,40797,-1] 2024-11-18T02:30:52,481 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T02:30:52,481 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:30:52,483 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55896, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T02:30:52,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43a7cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:30:52,484 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:30:52,485 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,33923,1731897051562, seqNum=-1] 2024-11-18T02:30:52,485 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:30:52,486 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36688, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:30:52,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c4730a2bacf8,40797,1731897051513 2024-11-18T02:30:52,488 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:30:52,491 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T02:30:52,491 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-18T02:30:52,491 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-18T02:30:52,491 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T02:30:52,492 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is c4730a2bacf8,40797,1731897051513 2024-11-18T02:30:52,492 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@39251207 2024-11-18T02:30:52,492 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T02:30:52,494 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55904, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T02:30:52,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40797 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T02:30:52,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40797 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T02:30:52,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40797 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:30:52,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40797 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T02:30:52,497 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T02:30:52,497 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:52,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40797 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-18T02:30:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:30:52,498 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T02:30:52,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36451 is added to blk_1073741835_1011 (size=395) 2024-11-18T02:30:52,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40633 is added to blk_1073741835_1011 (size=395) 2024-11-18T02:30:52,507 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cba96910749987edda9cc2704dcab941, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c 2024-11-18T02:30:52,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40633 is added to blk_1073741836_1012 (size=78) 2024-11-18T02:30:52,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36451 is added to blk_1073741836_1012 (size=78) 2024-11-18T02:30:52,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:52,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing cba96910749987edda9cc2704dcab941, disabling compactions & flushes 2024-11-18T02:30:52,513 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:30:52,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:30:52,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. after waiting 0 ms 2024-11-18T02:30:52,513 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:30:52,514 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:30:52,514 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for cba96910749987edda9cc2704dcab941: Waiting for close lock at 1731897052513Disabling compacts and flushes for region at 1731897052513Disabling writes for close at 1731897052513Writing region close event to WAL at 1731897052514 (+1 ms)Closed at 1731897052514 2024-11-18T02:30:52,515 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T02:30:52,515 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731897052515"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731897052515"}]},"ts":"1731897052515"} 2024-11-18T02:30:52,517 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T02:30:52,518 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T02:30:52,519 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731897052519"}]},"ts":"1731897052519"} 2024-11-18T02:30:52,520 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-18T02:30:52,521 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cba96910749987edda9cc2704dcab941, ASSIGN}] 2024-11-18T02:30:52,522 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cba96910749987edda9cc2704dcab941, ASSIGN 2024-11-18T02:30:52,523 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cba96910749987edda9cc2704dcab941, ASSIGN; state=OFFLINE, location=c4730a2bacf8,33923,1731897051562; forceNewPlan=false, retain=false 2024-11-18T02:30:52,674 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cba96910749987edda9cc2704dcab941, regionState=OPENING, regionLocation=c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:52,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cba96910749987edda9cc2704dcab941, ASSIGN because future has completed 2024-11-18T02:30:52,677 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cba96910749987edda9cc2704dcab941, server=c4730a2bacf8,33923,1731897051562}] 2024-11-18T02:30:52,834 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:30:52,834 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cba96910749987edda9cc2704dcab941, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:30:52,834 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,834 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:30:52,834 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,834 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,836 INFO [StoreOpener-cba96910749987edda9cc2704dcab941-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,837 INFO [StoreOpener-cba96910749987edda9cc2704dcab941-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cba96910749987edda9cc2704dcab941 columnFamilyName info 2024-11-18T02:30:52,837 DEBUG [StoreOpener-cba96910749987edda9cc2704dcab941-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:30:52,837 INFO [StoreOpener-cba96910749987edda9cc2704dcab941-1 {}] regionserver.HStore(327): Store=cba96910749987edda9cc2704dcab941/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:30:52,838 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,838 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/default/TestLogRolling-testLogRollOnPipelineRestart/cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,838 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/default/TestLogRolling-testLogRollOnPipelineRestart/cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,839 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,839 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,840 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,842 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/default/TestLogRolling-testLogRollOnPipelineRestart/cba96910749987edda9cc2704dcab941/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:30:52,843 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cba96910749987edda9cc2704dcab941; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743464, jitterRate=-0.054637372493743896}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T02:30:52,843 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cba96910749987edda9cc2704dcab941 2024-11-18T02:30:52,844 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cba96910749987edda9cc2704dcab941: Running coprocessor pre-open hook at 1731897052834Writing region info on filesystem at 1731897052834Initializing all the Stores at 1731897052835 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897052835Cleaning up temporary data from old regions at 1731897052839 (+4 ms)Running coprocessor post-open hooks at 1731897052843 (+4 ms)Region opened successfully at 1731897052843 2024-11-18T02:30:52,845 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941., pid=6, masterSystemTime=1731897052830 2024-11-18T02:30:52,847 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:30:52,847 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:30:52,848 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cba96910749987edda9cc2704dcab941, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,33923,1731897051562 2024-11-18T02:30:52,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cba96910749987edda9cc2704dcab941, server=c4730a2bacf8,33923,1731897051562 because future has completed 2024-11-18T02:30:52,854 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T02:30:52,854 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cba96910749987edda9cc2704dcab941, server=c4730a2bacf8,33923,1731897051562 in 175 msec 2024-11-18T02:30:52,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T02:30:52,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cba96910749987edda9cc2704dcab941, ASSIGN in 333 msec 2024-11-18T02:30:52,858 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T02:30:52,858 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731897052858"}]},"ts":"1731897052858"} 2024-11-18T02:30:52,860 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-18T02:30:52,861 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T02:30:52,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 367 msec 2024-11-18T02:30:53,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:53,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:54,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:54,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:55,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:55,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:56,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:56,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:57,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:57,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:57,838 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T02:30:57,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:30:57,865 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:30:57,865 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T02:30:57,865 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T02:30:57,865 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-18T02:30:57,866 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:30:57,866 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T02:30:57,866 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T02:30:57,867 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-18T02:30:58,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:58,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:59,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:30:59,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:00,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:00,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:01,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:01,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:02,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:02,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40797 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:31:02,554 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-18T02:31:02,554 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-18T02:31:02,557 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T02:31:02,557 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:31:02,560 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941., hostname=c4730a2bacf8,33923,1731897051562, seqNum=2] 2024-11-18T02:31:03,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:03,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:04,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:04,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:04,563 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 2024-11-18T02:31:04,563 WARN [ResponseProcessor for block BP-568169947-172.17.0.2-1731897050829:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-568169947-172.17.0.2-1731897050829:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:04,563 WARN [ResponseProcessor for block BP-568169947-172.17.0.2-1731897050829:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-568169947-172.17.0.2-1731897050829:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:04,564 WARN [ResponseProcessor for block BP-568169947-172.17.0.2-1731897050829:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-568169947-172.17.0.2-1731897050829:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:04,564 WARN [DataStreamer for file /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 block BP-568169947-172.17.0.2-1731897050829:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-568169947-172.17.0.2-1731897050829:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40633,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK], DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40633,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK]) is bad. 2024-11-18T02:31:04,564 WARN [DataStreamer for file /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 block BP-568169947-172.17.0.2-1731897050829:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-568169947-172.17.0.2-1731897050829:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40633,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK], DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40633,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK]) is bad. 2024-11-18T02:31:04,564 WARN [DataStreamer for file /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta block BP-568169947-172.17.0.2-1731897050829:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-568169947-172.17.0.2-1731897050829:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40633,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK], DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40633,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK]) is bad. 2024-11-18T02:31:04,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1237795701_22 at /127.0.0.1:60978 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60978 dst: /127.0.0.1:40633 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:04,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1436565403_22 at /127.0.0.1:60950 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60950 dst: /127.0.0.1:40633 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:04,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1436565403_22 at /127.0.0.1:43750 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43750 dst: /127.0.0.1:36451 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:04,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1237795701_22 at /127.0.0.1:43782 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43782 dst: /127.0.0.1:36451 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:04,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1237795701_22 at /127.0.0.1:60988 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60988 dst: /127.0.0.1:40633 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:04,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1237795701_22 at /127.0.0.1:43798 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43798 dst: /127.0.0.1:36451 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:04,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@23accf28{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:04,569 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34e43275{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:31:04,569 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:31:04,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cfd21d9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:31:04,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3192c1d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,STOPPED} 2024-11-18T02:31:04,570 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:31:04,570 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:31:04,570 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-568169947-172.17.0.2-1731897050829 (Datanode Uuid 4b122838-ac19-41cc-872c-933de20bbf9f) service to localhost/127.0.0.1:34129 2024-11-18T02:31:04,570 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:31:04,571 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data3/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:04,571 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data4/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:04,571 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:31:04,582 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:31:04,585 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:31:04,586 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:31:04,586 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:31:04,586 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:31:04,587 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61726e31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:31:04,587 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55b48cd0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:31:04,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f9084a1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/java.io.tmpdir/jetty-localhost-42375-hadoop-hdfs-3_4_1-tests_jar-_-any-15774843623913482170/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:04,705 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22dbe5ea{HTTP/1.1, (http/1.1)}{localhost:42375} 2024-11-18T02:31:04,705 INFO [Time-limited test {}] server.Server(415): Started @163161ms 2024-11-18T02:31:04,706 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:31:04,725 WARN [ResponseProcessor for block BP-568169947-172.17.0.2-1731897050829:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-568169947-172.17.0.2-1731897050829:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:04,725 WARN [ResponseProcessor for block BP-568169947-172.17.0.2-1731897050829:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-568169947-172.17.0.2-1731897050829:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:04,725 WARN [ResponseProcessor for block BP-568169947-172.17.0.2-1731897050829:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-568169947-172.17.0.2-1731897050829:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:04,725 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1237795701_22 at /127.0.0.1:41196 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41196 dst: /127.0.0.1:36451 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:04,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1237795701_22 at /127.0.0.1:41200 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41200 dst: /127.0.0.1:36451 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:04,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1436565403_22 at /127.0.0.1:41212 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36451:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41212 dst: /127.0.0.1:36451 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:04,733 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39a85688{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:04,733 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a011456{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:31:04,734 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:31:04,734 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70df7796{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:31:04,734 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43a454f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,STOPPED} 2024-11-18T02:31:04,735 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:31:04,735 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:31:04,735 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:31:04,735 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-568169947-172.17.0.2-1731897050829 (Datanode Uuid 9b45d3d3-3d89-43d5-9941-e1bd1904a9c3) service to localhost/127.0.0.1:34129 2024-11-18T02:31:04,738 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data1/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:04,738 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data2/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:04,739 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:31:04,747 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:31:04,751 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:31:04,752 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:31:04,752 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:31:04,752 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:31:04,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@290ac13e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:31:04,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c83d523{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:31:04,807 WARN [Thread-1335 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:31:04,810 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4bd71b9e375f57a with lease ID 0x5fe9e8a464d50c12: from storage DS-b1452ce0-ff81-474f-ab85-6834de112d24 node DatanodeRegistration(127.0.0.1:41849, datanodeUuid=4b122838-ac19-41cc-872c-933de20bbf9f, infoPort=35869, infoSecurePort=0, ipcPort=33835, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T02:31:04,810 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4bd71b9e375f57a with lease ID 0x5fe9e8a464d50c12: from storage DS-b54e9ad4-024a-43f7-8b32-3691f5426c00 node DatanodeRegistration(127.0.0.1:41849, datanodeUuid=4b122838-ac19-41cc-872c-933de20bbf9f, infoPort=35869, infoSecurePort=0, ipcPort=33835, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:31:04,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d5ab907{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/java.io.tmpdir/jetty-localhost-35603-hadoop-hdfs-3_4_1-tests_jar-_-any-15032387027496120046/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:04,875 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34ffd22{HTTP/1.1, (http/1.1)}{localhost:35603} 2024-11-18T02:31:04,875 INFO [Time-limited test {}] server.Server(415): Started @163331ms 2024-11-18T02:31:04,876 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:31:04,963 WARN [Thread-1366 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:31:04,965 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x820c1c5f2eb59358 with lease ID 0x5fe9e8a464d50c13: from storage DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b node DatanodeRegistration(127.0.0.1:35011, datanodeUuid=9b45d3d3-3d89-43d5-9941-e1bd1904a9c3, infoPort=37877, infoSecurePort=0, ipcPort=33147, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:31:04,966 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x820c1c5f2eb59358 with lease ID 0x5fe9e8a464d50c13: from storage DS-ef7a441c-ba73-4da5-8b70-4d4390d9c317 node DatanodeRegistration(127.0.0.1:35011, datanodeUuid=9b45d3d3-3d89-43d5-9941-e1bd1904a9c3, infoPort=37877, infoSecurePort=0, ipcPort=33147, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T02:31:05,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:05,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:05,894 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-18T02:31:05,897 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-18T02:31:05,898 ERROR [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c-prefix:c4730a2bacf8,33923,1731897051562 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:05,898 WARN [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c-prefix:c4730a2bacf8,33923,1731897051562 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:05,898 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C33923%2C1731897051562:(num 1731897051947) roll requested 2024-11-18T02:31:05,898 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33923%2C1731897051562.1731897065898 2024-11-18T02:31:05,904 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 newFile=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 2024-11-18T02:31:05,904 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:05,904 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:05,904 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:05,904 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:05,904 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:05,905 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 2024-11-18T02:31:05,905 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:05,905 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:05,905 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 2024-11-18T02:31:05,905 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37877:37877),(127.0.0.1/127.0.0.1:35869:35869)] 2024-11-18T02:31:05,905 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 is not closed yet, will try archiving it next time 2024-11-18T02:31:05,906 WARN [IPC Server handler 4 on default port 34129 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-18T02:31:05,906 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 after 1ms 2024-11-18T02:31:06,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:06,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:06,810 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T02:31:07,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:07,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:07,909 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-18T02:31:08,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:08,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:09,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:09,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:09,907 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 after 4002ms 2024-11-18T02:31:09,912 WARN [ResponseProcessor for block BP-568169947-172.17.0.2-1731897050829:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-568169947-172.17.0.2-1731897050829:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:09,912 WARN [DataStreamer for file /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 block BP-568169947-172.17.0.2-1731897050829:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-568169947-172.17.0.2-1731897050829:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35011,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK], DatanodeInfoWithStorage[127.0.0.1:41849,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35011,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]) is bad. 2024-11-18T02:31:09,912 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1237795701_22 at /127.0.0.1:58202 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58202 dst: /127.0.0.1:35011 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:09,913 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1237795701_22 at /127.0.0.1:39892 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41849:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39892 dst: /127.0.0.1:41849 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:09,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d5ab907{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:09,915 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34ffd22{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:31:09,915 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:31:09,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c83d523{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:31:09,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@290ac13e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,STOPPED} 2024-11-18T02:31:09,916 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:31:09,916 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:31:09,916 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:31:09,916 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-568169947-172.17.0.2-1731897050829 (Datanode Uuid 9b45d3d3-3d89-43d5-9941-e1bd1904a9c3) service to localhost/127.0.0.1:34129 2024-11-18T02:31:09,917 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data1/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:09,918 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data2/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:09,918 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:31:09,925 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:31:09,928 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:31:09,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:31:09,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:31:09,929 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:31:09,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cea2bec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:31:09,930 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3affdf32{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:31:10,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a1054f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/java.io.tmpdir/jetty-localhost-43501-hadoop-hdfs-3_4_1-tests_jar-_-any-12758804804924995949/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:10,044 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@468f57bd{HTTP/1.1, (http/1.1)}{localhost:43501} 2024-11-18T02:31:10,044 INFO [Time-limited test {}] server.Server(415): Started @168500ms 2024-11-18T02:31:10,045 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:31:10,065 WARN [ResponseProcessor for block BP-568169947-172.17.0.2-1731897050829:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-568169947-172.17.0.2-1731897050829:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:10,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1237795701_22 at /127.0.0.1:39908 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41849:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39908 dst: /127.0.0.1:41849 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:10,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f9084a1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:10,069 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22dbe5ea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:31:10,069 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:31:10,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55b48cd0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:31:10,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61726e31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,STOPPED} 2024-11-18T02:31:10,070 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:31:10,070 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-568169947-172.17.0.2-1731897050829 (Datanode Uuid 4b122838-ac19-41cc-872c-933de20bbf9f) service to localhost/127.0.0.1:34129 2024-11-18T02:31:10,070 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:31:10,070 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:31:10,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data3/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:10,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data4/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:10,071 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:31:10,081 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:31:10,084 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:31:10,085 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:31:10,085 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:31:10,085 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:31:10,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@584bc61c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:31:10,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c3c718b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:31:10,135 WARN [Thread-1409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:31:10,138 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ba32e1525805f37 with lease ID 0x5fe9e8a464d50c14: from storage DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b node DatanodeRegistration(127.0.0.1:34165, datanodeUuid=9b45d3d3-3d89-43d5-9941-e1bd1904a9c3, infoPort=36331, infoSecurePort=0, ipcPort=44567, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T02:31:10,138 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ba32e1525805f37 with lease ID 0x5fe9e8a464d50c14: from storage DS-ef7a441c-ba73-4da5-8b70-4d4390d9c317 node DatanodeRegistration(127.0.0.1:34165, datanodeUuid=9b45d3d3-3d89-43d5-9941-e1bd1904a9c3, infoPort=36331, infoSecurePort=0, ipcPort=44567, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:31:10,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10b7c71{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/java.io.tmpdir/jetty-localhost-37659-hadoop-hdfs-3_4_1-tests_jar-_-any-2468586612166658568/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:10,200 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b5b0e88{HTTP/1.1, (http/1.1)}{localhost:37659} 2024-11-18T02:31:10,200 INFO [Time-limited test {}] server.Server(415): Started @168656ms 2024-11-18T02:31:10,201 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:31:10,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:10,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:10,288 WARN [Thread-1440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:31:10,290 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f8c916569f6551b with lease ID 0x5fe9e8a464d50c15: from storage DS-b1452ce0-ff81-474f-ab85-6834de112d24 node DatanodeRegistration(127.0.0.1:39677, datanodeUuid=4b122838-ac19-41cc-872c-933de20bbf9f, infoPort=35153, infoSecurePort=0, ipcPort=34781, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:31:10,290 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f8c916569f6551b with lease ID 0x5fe9e8a464d50c15: from storage DS-b54e9ad4-024a-43f7-8b32-3691f5426c00 node DatanodeRegistration(127.0.0.1:39677, datanodeUuid=4b122838-ac19-41cc-872c-933de20bbf9f, infoPort=35153, infoSecurePort=0, ipcPort=34781, storageInfo=lv=-57;cid=testClusterID;nsid=705101038;c=1731897050829), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:31:11,219 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-18T02:31:11,221 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-18T02:31:11,222 ERROR [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c-prefix:c4730a2bacf8,33923,1731897051562 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41849,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:11,222 WARN [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c-prefix:c4730a2bacf8,33923,1731897051562 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41849,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:11,222 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C33923%2C1731897051562:(num 1731897065898) roll requested 2024-11-18T02:31:11,223 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33923%2C1731897051562.1731897071222 2024-11-18T02:31:11,228 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 newFile=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897071222 2024-11-18T02:31:11,228 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:11,229 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:11,229 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:11,229 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:11,229 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:11,229 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897071222 2024-11-18T02:31:11,229 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41849,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:11,229 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41849,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:11,229 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 2024-11-18T02:31:11,230 WARN [IPC Server handler 0 on default port 34129 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-18T02:31:11,230 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36331:36331),(127.0.0.1/127.0.0.1:35153:35153)] 2024-11-18T02:31:11,230 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 is not closed yet, will try archiving it next time 2024-11-18T02:31:11,230 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 after 1ms 2024-11-18T02:31:11,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:11,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:12,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:12,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:13,232 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33923%2C1731897051562.1731897073231 2024-11-18T02:31:13,237 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897071222 newFile=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 2024-11-18T02:31:13,238 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:13,238 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:13,238 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:13,238 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:13,238 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:13,238 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897071222 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 2024-11-18T02:31:13,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741838_1019 (size=1264) 2024-11-18T02:31:13,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741838_1019 (size=1264) 2024-11-18T02:31:13,243 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 is not closed yet, will try archiving it next time 2024-11-18T02:31:13,244 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36331:36331),(127.0.0.1/127.0.0.1:35153:35153)] 2024-11-18T02:31:13,245 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 is not closed yet, will try archiving it next time 2024-11-18T02:31:13,245 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 2024-11-18T02:31:13,245 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 2024-11-18T02:31:13,245 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 after 0ms 2024-11-18T02:31:13,245 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 2024-11-18T02:31:13,255 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731897052844/Put/vlen=218/seqid=0] 2024-11-18T02:31:13,255 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731897062561/Put/vlen=1045/seqid=0] 2024-11-18T02:31:13,255 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897051947 2024-11-18T02:31:13,255 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 2024-11-18T02:31:13,255 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 2024-11-18T02:31:13,256 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 after 1ms 2024-11-18T02:31:13,256 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 2024-11-18T02:31:13,260 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731897065898/Put/vlen=1045/seqid=0] 2024-11-18T02:31:13,260 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731897067910/Put/vlen=1045/seqid=0] 2024-11-18T02:31:13,260 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 2024-11-18T02:31:13,260 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897071222 2024-11-18T02:31:13,260 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897071222 2024-11-18T02:31:13,260 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897071222 after 0ms 2024-11-18T02:31:13,260 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897071222 2024-11-18T02:31:13,263 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731897071222/Put/vlen=1045/seqid=0] 2024-11-18T02:31:13,263 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 2024-11-18T02:31:13,263 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 2024-11-18T02:31:13,264 WARN [IPC Server handler 2 on default port 34129 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-18T02:31:13,264 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 after 1ms 2024-11-18T02:31:13,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:13,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:14,138 WARN [ResponseProcessor for block BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:14,138 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1436565403_22 at /127.0.0.1:47360 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34165:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47360 dst: /127.0.0.1:34165 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:34165 remote=/127.0.0.1:47360]. Total timeout mills is 60000, 59098 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:14,139 WARN [DataStreamer for file /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 block BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34165,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK], DatanodeInfoWithStorage[127.0.0.1:39677,DS-b1452ce0-ff81-474f-ab85-6834de112d24,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34165,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]) is bad. 2024-11-18T02:31:14,138 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1436565403_22 at /127.0.0.1:52006 [Receiving block BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39677:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52006 dst: /127.0.0.1:39677 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:14,139 WARN [DataStreamer for file /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 block BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:14,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741839_1022 (size=85) 2024-11-18T02:31:14,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741839_1022 (size=85) 2024-11-18T02:31:14,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:14,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:15,138 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T02:31:15,231 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897065898 after 4002ms 2024-11-18T02:31:15,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:15,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:16,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:16,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:17,265 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 after 4002ms 2024-11-18T02:31:17,265 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 2024-11-18T02:31:17,269 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 2024-11-18T02:31:17,269 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing cba96910749987edda9cc2704dcab941 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-18T02:31:17,270 ERROR [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c-prefix:c4730a2bacf8,33923,1731897051562 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:17,270 WARN [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c-prefix:c4730a2bacf8,33923,1731897051562 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:17,270 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C33923%2C1731897051562:(num 1731897073231) roll requested 2024-11-18T02:31:17,271 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33923%2C1731897051562.1731897077270 2024-11-18T02:31:17,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:17,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:17,276 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 newFile=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897077270 2024-11-18T02:31:17,276 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,276 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,277 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,277 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,277 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,277 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897077270 2024-11-18T02:31:17,277 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:17,278 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36331:36331),(127.0.0.1/127.0.0.1:35153:35153)] 2024-11-18T02:31:17,277 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-568169947-172.17.0.2-1731897050829:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:17,278 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 is not closed yet, will try archiving it next time 2024-11-18T02:31:17,278 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 2024-11-18T02:31:17,278 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 after 0ms 2024-11-18T02:31:17,279 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 to hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/oldWALs/c4730a2bacf8%2C33923%2C1731897051562.1731897073231 2024-11-18T02:31:17,296 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/default/TestLogRolling-testLogRollOnPipelineRestart/cba96910749987edda9cc2704dcab941/.tmp/info/5c8cc30a781b43ea91280bba1c6addd5 is 1080, key is row1002/info:/1731897062561/Put/seqid=0 2024-11-18T02:31:17,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741841_1024 (size=9270) 2024-11-18T02:31:17,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741841_1024 (size=9270) 2024-11-18T02:31:17,301 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/default/TestLogRolling-testLogRollOnPipelineRestart/cba96910749987edda9cc2704dcab941/.tmp/info/5c8cc30a781b43ea91280bba1c6addd5 2024-11-18T02:31:17,307 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/default/TestLogRolling-testLogRollOnPipelineRestart/cba96910749987edda9cc2704dcab941/.tmp/info/5c8cc30a781b43ea91280bba1c6addd5 as hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/default/TestLogRolling-testLogRollOnPipelineRestart/cba96910749987edda9cc2704dcab941/info/5c8cc30a781b43ea91280bba1c6addd5 2024-11-18T02:31:17,312 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/default/TestLogRolling-testLogRollOnPipelineRestart/cba96910749987edda9cc2704dcab941/info/5c8cc30a781b43ea91280bba1c6addd5, entries=4, sequenceid=8, filesize=9.1 K 2024-11-18T02:31:17,313 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for cba96910749987edda9cc2704dcab941 in 44ms, sequenceid=8, compaction requested=false 2024-11-18T02:31:17,314 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for cba96910749987edda9cc2704dcab941: 2024-11-18T02:31:17,314 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-18T02:31:17,314 ERROR [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c-prefix:c4730a2bacf8,33923,1731897051562.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:17,314 WARN [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c-prefix:c4730a2bacf8,33923,1731897051562.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:17,314 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C33923%2C1731897051562.meta:.meta(num 1731897052329) roll requested 2024-11-18T02:31:17,315 INFO [regionserver/c4730a2bacf8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33923%2C1731897051562.meta.1731897077314.meta 2024-11-18T02:31:17,320 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,320 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,321 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,321 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,321 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,321 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897077314.meta 2024-11-18T02:31:17,322 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:17,322 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:17,322 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta 2024-11-18T02:31:17,322 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36331:36331),(127.0.0.1/127.0.0.1:35153:35153)] 2024-11-18T02:31:17,322 DEBUG [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta is not closed yet, will try archiving it next time 2024-11-18T02:31:17,322 WARN [IPC Server handler 2 on default port 34129 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-18T02:31:17,322 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta after 0ms 2024-11-18T02:31:17,338 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/.tmp/info/e375675dde3a4de2960f54c5892fee2c is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941./info:regioninfo/1731897052848/Put/seqid=0 2024-11-18T02:31:17,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741843_1027 (size=7125) 2024-11-18T02:31:17,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741843_1027 (size=7125) 2024-11-18T02:31:17,344 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/.tmp/info/e375675dde3a4de2960f54c5892fee2c 2024-11-18T02:31:17,364 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/.tmp/ns/afdf46be735f42e4b1978f60d5de6f9c is 43, key is default/ns:d/1731897052369/Put/seqid=0 2024-11-18T02:31:17,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741844_1028 (size=5153) 2024-11-18T02:31:17,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741844_1028 (size=5153) 2024-11-18T02:31:17,369 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/.tmp/ns/afdf46be735f42e4b1978f60d5de6f9c 2024-11-18T02:31:17,389 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/.tmp/table/3250d140dc0147a29f958ec9bc747a91 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731897052858/Put/seqid=0 2024-11-18T02:31:17,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741845_1029 (size=5438) 2024-11-18T02:31:17,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741845_1029 (size=5438) 2024-11-18T02:31:17,394 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/.tmp/table/3250d140dc0147a29f958ec9bc747a91 2024-11-18T02:31:17,399 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/.tmp/info/e375675dde3a4de2960f54c5892fee2c as hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/info/e375675dde3a4de2960f54c5892fee2c 2024-11-18T02:31:17,404 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/info/e375675dde3a4de2960f54c5892fee2c, entries=10, sequenceid=11, filesize=7.0 K 2024-11-18T02:31:17,405 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/.tmp/ns/afdf46be735f42e4b1978f60d5de6f9c as hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/ns/afdf46be735f42e4b1978f60d5de6f9c 2024-11-18T02:31:17,410 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/ns/afdf46be735f42e4b1978f60d5de6f9c, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T02:31:17,411 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/.tmp/table/3250d140dc0147a29f958ec9bc747a91 as hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/table/3250d140dc0147a29f958ec9bc747a91 2024-11-18T02:31:17,416 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/table/3250d140dc0147a29f958ec9bc747a91, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T02:31:17,417 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 103ms, sequenceid=11, compaction requested=false 2024-11-18T02:31:17,417 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T02:31:17,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T02:31:17,423 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:31:17,423 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:31:17,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:31:17,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:31:17,423 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T02:31:17,424 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T02:31:17,424 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=795453095, stopped=false 2024-11-18T02:31:17,424 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c4730a2bacf8,40797,1731897051513 2024-11-18T02:31:17,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:31:17,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:17,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:31:17,425 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:31:17,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:17,426 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:31:17,426 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:31:17,426 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:31:17,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:31:17,426 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:31:17,426 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c4730a2bacf8,33923,1731897051562' ***** 2024-11-18T02:31:17,426 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T02:31:17,427 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T02:31:17,427 INFO [RS:0;c4730a2bacf8:33923 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T02:31:17,427 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T02:31:17,427 INFO [RS:0;c4730a2bacf8:33923 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T02:31:17,427 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(3091): Received CLOSE for cba96910749987edda9cc2704dcab941 2024-11-18T02:31:17,427 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(959): stopping server c4730a2bacf8,33923,1731897051562 2024-11-18T02:31:17,427 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:31:17,427 INFO [RS:0;c4730a2bacf8:33923 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c4730a2bacf8:33923. 2024-11-18T02:31:17,428 DEBUG [RS:0;c4730a2bacf8:33923 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:31:17,428 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cba96910749987edda9cc2704dcab941, disabling compactions & flushes 2024-11-18T02:31:17,428 DEBUG [RS:0;c4730a2bacf8:33923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:31:17,428 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:31:17,428 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:31:17,428 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T02:31:17,428 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. after waiting 0 ms 2024-11-18T02:31:17,428 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T02:31:17,428 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:31:17,428 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T02:31:17,428 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T02:31:17,428 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T02:31:17,428 DEBUG [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(1325): Online Regions={cba96910749987edda9cc2704dcab941=TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T02:31:17,428 DEBUG [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cba96910749987edda9cc2704dcab941 2024-11-18T02:31:17,428 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:31:17,428 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:31:17,428 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:31:17,428 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:31:17,429 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:31:17,433 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/default/TestLogRolling-testLogRollOnPipelineRestart/cba96910749987edda9cc2704dcab941/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-18T02:31:17,433 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:31:17,433 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T02:31:17,433 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cba96910749987edda9cc2704dcab941: Waiting for close lock at 1731897077427Running coprocessor pre-close hooks at 1731897077427Disabling compacts and flushes for region at 1731897077427Disabling writes for close at 1731897077428 (+1 ms)Writing region close event to WAL at 1731897077428Running coprocessor post-close hooks at 1731897077433 (+5 ms)Closed at 1731897077433 2024-11-18T02:31:17,434 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731897052494.cba96910749987edda9cc2704dcab941. 2024-11-18T02:31:17,434 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:31:17,434 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:31:17,434 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897077428Running coprocessor pre-close hooks at 1731897077428Disabling compacts and flushes for region at 1731897077428Disabling writes for close at 1731897077429 (+1 ms)Writing region close event to WAL at 1731897077430 (+1 ms)Running coprocessor post-close hooks at 1731897077434 (+4 ms)Closed at 1731897077434 2024-11-18T02:31:17,434 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T02:31:17,628 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(976): stopping server c4730a2bacf8,33923,1731897051562; all regions closed. 2024-11-18T02:31:17,629 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,629 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,629 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,629 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,629 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:17,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741842_1025 (size=825) 2024-11-18T02:31:17,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741842_1025 (size=825) 2024-11-18T02:31:17,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:31:17,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:31:17,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T02:31:17,813 INFO [regionserver/c4730a2bacf8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:31:17,815 INFO [regionserver/c4730a2bacf8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T02:31:17,815 INFO [regionserver/c4730a2bacf8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T02:31:18,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:18,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:19,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:19,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:20,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:20,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:21,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:21,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:21,290 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T02:31:21,323 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta after 4001ms 2024-11-18T02:31:21,324 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/WALs/c4730a2bacf8,33923,1731897051562/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta to hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/oldWALs/c4730a2bacf8%2C33923%2C1731897051562.meta.1731897052329.meta 2024-11-18T02:31:21,326 DEBUG [RS:0;c4730a2bacf8:33923 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/oldWALs 2024-11-18T02:31:21,326 INFO [RS:0;c4730a2bacf8:33923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C33923%2C1731897051562.meta:.meta(num 1731897077314) 2024-11-18T02:31:21,327 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,327 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,327 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,327 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,327 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741840_1023 (size=1162) 2024-11-18T02:31:21,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741840_1023 (size=1162) 2024-11-18T02:31:21,333 DEBUG [RS:0;c4730a2bacf8:33923 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/oldWALs 2024-11-18T02:31:21,333 INFO [RS:0;c4730a2bacf8:33923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C33923%2C1731897051562:(num 1731897077270) 2024-11-18T02:31:21,333 DEBUG [RS:0;c4730a2bacf8:33923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:31:21,333 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:31:21,333 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:31:21,333 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.ChoreService(370): Chore service for: regionserver/c4730a2bacf8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T02:31:21,334 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:31:21,334 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:31:21,334 INFO [RS:0;c4730a2bacf8:33923 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33923 2024-11-18T02:31:21,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:31:21,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c4730a2bacf8,33923,1731897051562 2024-11-18T02:31:21,336 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:31:21,338 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c4730a2bacf8,33923,1731897051562] 2024-11-18T02:31:21,341 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c4730a2bacf8,33923,1731897051562 already deleted, retry=false 2024-11-18T02:31:21,341 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c4730a2bacf8,33923,1731897051562 expired; onlineServers=0 2024-11-18T02:31:21,341 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c4730a2bacf8,40797,1731897051513' ***** 2024-11-18T02:31:21,341 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T02:31:21,341 INFO [M:0;c4730a2bacf8:40797 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:31:21,341 INFO [M:0;c4730a2bacf8:40797 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:31:21,341 DEBUG [M:0;c4730a2bacf8:40797 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T02:31:21,342 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T02:31:21,342 DEBUG [M:0;c4730a2bacf8:40797 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T02:31:21,342 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897051727 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897051727,5,FailOnTimeoutGroup] 2024-11-18T02:31:21,342 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897051727 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897051727,5,FailOnTimeoutGroup] 2024-11-18T02:31:21,342 INFO [M:0;c4730a2bacf8:40797 {}] hbase.ChoreService(370): Chore service for: master/c4730a2bacf8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T02:31:21,342 INFO [M:0;c4730a2bacf8:40797 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:31:21,342 DEBUG [M:0;c4730a2bacf8:40797 {}] master.HMaster(1795): Stopping service threads 2024-11-18T02:31:21,342 INFO [M:0;c4730a2bacf8:40797 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T02:31:21,342 INFO [M:0;c4730a2bacf8:40797 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:31:21,342 INFO [M:0;c4730a2bacf8:40797 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T02:31:21,342 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T02:31:21,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T02:31:21,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:21,343 DEBUG [M:0;c4730a2bacf8:40797 {}] zookeeper.ZKUtil(347): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T02:31:21,343 WARN [M:0;c4730a2bacf8:40797 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T02:31:21,344 INFO [M:0;c4730a2bacf8:40797 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/.lastflushedseqids 2024-11-18T02:31:21,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741846_1030 (size=130) 2024-11-18T02:31:21,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741846_1030 (size=130) 2024-11-18T02:31:21,350 INFO [M:0;c4730a2bacf8:40797 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T02:31:21,350 INFO [M:0;c4730a2bacf8:40797 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T02:31:21,350 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:31:21,350 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:31:21,350 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:31:21,350 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:31:21,350 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:31:21,350 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-18T02:31:21,351 ERROR [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData-prefix:c4730a2bacf8,40797,1731897051513 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:21,351 WARN [FSHLog-0-hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData-prefix:c4730a2bacf8,40797,1731897051513 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:21,351 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c4730a2bacf8%2C40797%2C1731897051513:(num 1731897051649) roll requested 2024-11-18T02:31:21,351 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C40797%2C1731897051513.1731897081351 2024-11-18T02:31:21,355 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,356 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,356 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,356 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,356 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,356 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897081351 2024-11-18T02:31:21,356 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:21,356 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36451,DS-d6ad7ad4-b13e-4bbb-ace1-31b18ca5fd0b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T02:31:21,357 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 2024-11-18T02:31:21,357 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36331:36331),(127.0.0.1/127.0.0.1:35153:35153)] 2024-11-18T02:31:21,357 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 is not closed yet, will try archiving it next time 2024-11-18T02:31:21,357 WARN [IPC Server handler 0 on default port 34129 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-18T02:31:21,357 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 after 0ms 2024-11-18T02:31:21,372 DEBUG [M:0;c4730a2bacf8:40797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d7267e87b4154b3fbae226d7684c0150 is 82, key is hbase:meta,,1/info:regioninfo/1731897052353/Put/seqid=0 2024-11-18T02:31:21,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741848_1033 (size=5672) 2024-11-18T02:31:21,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741848_1033 (size=5672) 2024-11-18T02:31:21,377 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d7267e87b4154b3fbae226d7684c0150 2024-11-18T02:31:21,396 DEBUG [M:0;c4730a2bacf8:40797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5adccc02171044b982defcd58073d9b5 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731897052862/Put/seqid=0 2024-11-18T02:31:21,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741849_1034 (size=6117) 2024-11-18T02:31:21,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741849_1034 (size=6117) 2024-11-18T02:31:21,401 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5adccc02171044b982defcd58073d9b5 2024-11-18T02:31:21,420 DEBUG [M:0;c4730a2bacf8:40797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6a1b80d807e84752925f0bbb2bfa7bae is 69, key is c4730a2bacf8,33923,1731897051562/rs:state/1731897051798/Put/seqid=0 2024-11-18T02:31:21,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741850_1035 (size=5156) 2024-11-18T02:31:21,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741850_1035 (size=5156) 2024-11-18T02:31:21,425 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6a1b80d807e84752925f0bbb2bfa7bae 2024-11-18T02:31:21,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:31:21,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33923-0x10128e9b3fb0001, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:31:21,438 INFO [RS:0;c4730a2bacf8:33923 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:31:21,438 INFO [RS:0;c4730a2bacf8:33923 {}] regionserver.HRegionServer(1031): Exiting; stopping=c4730a2bacf8,33923,1731897051562; zookeeper connection closed. 2024-11-18T02:31:21,438 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3cf373c3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3cf373c3 2024-11-18T02:31:21,438 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T02:31:21,444 DEBUG [M:0;c4730a2bacf8:40797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d9f92c3a033a49319c8ea721968dad0e is 52, key is load_balancer_on/state:d/1731897052490/Put/seqid=0 2024-11-18T02:31:21,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741851_1036 (size=5056) 2024-11-18T02:31:21,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741851_1036 (size=5056) 2024-11-18T02:31:21,449 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d9f92c3a033a49319c8ea721968dad0e 2024-11-18T02:31:21,454 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d7267e87b4154b3fbae226d7684c0150 as hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d7267e87b4154b3fbae226d7684c0150 2024-11-18T02:31:21,458 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d7267e87b4154b3fbae226d7684c0150, entries=8, sequenceid=56, filesize=5.5 K 2024-11-18T02:31:21,459 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5adccc02171044b982defcd58073d9b5 as hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5adccc02171044b982defcd58073d9b5 2024-11-18T02:31:21,464 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5adccc02171044b982defcd58073d9b5, entries=6, sequenceid=56, filesize=6.0 K 2024-11-18T02:31:21,464 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6a1b80d807e84752925f0bbb2bfa7bae as hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6a1b80d807e84752925f0bbb2bfa7bae 2024-11-18T02:31:21,469 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6a1b80d807e84752925f0bbb2bfa7bae, entries=1, sequenceid=56, filesize=5.0 K 2024-11-18T02:31:21,469 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d9f92c3a033a49319c8ea721968dad0e as hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d9f92c3a033a49319c8ea721968dad0e 2024-11-18T02:31:21,474 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d9f92c3a033a49319c8ea721968dad0e, entries=1, sequenceid=56, filesize=4.9 K 2024-11-18T02:31:21,475 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=56, compaction requested=false 2024-11-18T02:31:21,476 INFO [M:0;c4730a2bacf8:40797 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:31:21,476 DEBUG [M:0;c4730a2bacf8:40797 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897081350Disabling compacts and flushes for region at 1731897081350Disabling writes for close at 1731897081350Obtaining lock to block concurrent updates at 1731897081350Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731897081350Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731897081351 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731897081357 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731897081357Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731897081372 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731897081372Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731897081382 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731897081395 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731897081395Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731897081406 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731897081419 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731897081419Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731897081429 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731897081443 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731897081443Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a7e7fef: reopening flushed file at 1731897081453 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d76c363: reopening flushed file at 1731897081458 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a2981c1: reopening flushed file at 1731897081464 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71a0eacc: reopening flushed file at 1731897081469 (+5 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=56, compaction requested=false at 1731897081475 (+6 ms)Writing region close event to WAL at 1731897081476 (+1 ms)Closed at 1731897081476 2024-11-18T02:31:21,477 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,477 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,477 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,477 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,477 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:21,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39677 is added to blk_1073741847_1031 (size=757) 2024-11-18T02:31:21,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34165 is added to blk_1073741847_1031 (size=757) 2024-11-18T02:31:21,497 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T02:31:22,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:22,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:22,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,451 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,957 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T02:31:22,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:22,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:23,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:23,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:24,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:24,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:24,290 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T02:31:25,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:25,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:25,358 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 after 4001ms 2024-11-18T02:31:25,358 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/WALs/c4730a2bacf8,40797,1731897051513/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 to hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/oldWALs/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 2024-11-18T02:31:25,362 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/MasterData/oldWALs/c4730a2bacf8%2C40797%2C1731897051513.1731897051649 to hdfs://localhost:34129/user/jenkins/test-data/8bc4f6cf-f0af-073a-3cdb-4f07f7406c9c/oldWALs/c4730a2bacf8%2C40797%2C1731897051513.1731897051649$masterlocalwal$ 2024-11-18T02:31:25,362 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:31:25,362 INFO [M:0;c4730a2bacf8:40797 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T02:31:25,362 INFO [M:0;c4730a2bacf8:40797 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40797 2024-11-18T02:31:25,362 INFO [M:0;c4730a2bacf8:40797 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:31:25,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:31:25,464 INFO [M:0;c4730a2bacf8:40797 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:31:25,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40797-0x10128e9b3fb0000, quorum=127.0.0.1:59711, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:31:25,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10b7c71{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:25,467 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b5b0e88{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:31:25,467 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:31:25,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c3c718b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:31:25,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@584bc61c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,STOPPED} 2024-11-18T02:31:25,468 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:31:25,468 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-568169947-172.17.0.2-1731897050829 (Datanode Uuid 4b122838-ac19-41cc-872c-933de20bbf9f) service to localhost/127.0.0.1:34129 2024-11-18T02:31:25,468 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:31:25,468 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:31:25,469 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data3/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:25,469 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data4/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:25,469 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:31:25,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a1054f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:25,471 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@468f57bd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:31:25,471 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:31:25,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3affdf32{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:31:25,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cea2bec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,STOPPED} 2024-11-18T02:31:25,473 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:31:25,473 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:31:25,473 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:31:25,473 WARN [BP-568169947-172.17.0.2-1731897050829 heartbeating to localhost/127.0.0.1:34129 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-568169947-172.17.0.2-1731897050829 (Datanode Uuid 9b45d3d3-3d89-43d5-9941-e1bd1904a9c3) service to localhost/127.0.0.1:34129 2024-11-18T02:31:25,473 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data1/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:25,473 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/cluster_bee74fa7-5d87-7ad9-c36f-feb9a03e3136/data/data2/current/BP-568169947-172.17.0.2-1731897050829 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:31:25,474 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:31:25,479 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45628471{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:31:25,479 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@371f8296{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:31:25,479 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:31:25,479 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@482b9b0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:31:25,479 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e179503{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir/,STOPPED} 2024-11-18T02:31:25,485 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T02:31:25,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T02:31:25,509 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 155) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34129 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:34129 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34129 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34129 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:34129 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:34129 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:34129 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34129 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=102 (was 158), ProcessCount=11 (was 11), AvailableMemoryMB=2924 (was 3080) 2024-11-18T02:31:25,517 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=102, ProcessCount=11, AvailableMemoryMB=2924 2024-11-18T02:31:25,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T02:31:25,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.log.dir so I do NOT create it in target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333 2024-11-18T02:31:25,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6f66e6a-960b-383a-4c45-1b9b76c04784/hadoop.tmp.dir so I do NOT create it in target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333 2024-11-18T02:31:25,517 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6, deleteOnExit=true 2024-11-18T02:31:25,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/test.cache.data in system properties and HBase conf 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.log.dir in system properties and HBase conf 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T02:31:25,518 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:31:25,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T02:31:25,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T02:31:25,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:31:25,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:31:25,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T02:31:25,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/nfs.dump.dir in system properties and HBase conf 2024-11-18T02:31:25,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/java.io.tmpdir in system properties and HBase conf 2024-11-18T02:31:25,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:31:25,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T02:31:25,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T02:31:25,532 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:31:25,607 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:31:25,610 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:31:25,611 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:31:25,612 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:31:25,612 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:31:25,612 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:31:25,613 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ff27683{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:31:25,613 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b4ea813{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:31:25,727 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7fb33a9d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/java.io.tmpdir/jetty-localhost-33787-hadoop-hdfs-3_4_1-tests_jar-_-any-12307379554363144965/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:31:25,728 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a560185{HTTP/1.1, (http/1.1)}{localhost:33787} 2024-11-18T02:31:25,728 INFO [Time-limited test {}] server.Server(415): Started @184184ms 2024-11-18T02:31:25,741 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:31:25,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:31:25,810 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:31:25,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:31:25,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:31:25,811 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:31:25,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30d9f702{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:31:25,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ff483aa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:31:25,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@423ae426{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/java.io.tmpdir/jetty-localhost-44677-hadoop-hdfs-3_4_1-tests_jar-_-any-9714126302765430551/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:25,929 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b8edabe{HTTP/1.1, (http/1.1)}{localhost:44677} 2024-11-18T02:31:25,929 INFO [Time-limited test {}] server.Server(415): Started @184385ms 2024-11-18T02:31:25,930 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:31:25,960 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:31:25,963 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:31:25,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:31:25,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:31:25,963 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:31:25,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bfa82bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:31:25,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4957c8be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:31:26,022 WARN [Thread-1634 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/data/data1/current/BP-1868350782-172.17.0.2-1731897085549/current, will proceed with Du for space computation calculation, 2024-11-18T02:31:26,022 WARN [Thread-1635 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/data/data2/current/BP-1868350782-172.17.0.2-1731897085549/current, will proceed with Du for space computation calculation, 2024-11-18T02:31:26,039 WARN [Thread-1613 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:31:26,042 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a74ea0b8e577f38 with lease ID 0x12f569af66c9e9e0: Processing first storage report for DS-528eb75c-14ed-4f18-88a3-29ac8dfc3c18 from datanode DatanodeRegistration(127.0.0.1:41447, datanodeUuid=a536b8c4-0094-4746-ad09-bc98bd36bf3b, infoPort=37667, infoSecurePort=0, ipcPort=34601, storageInfo=lv=-57;cid=testClusterID;nsid=58354486;c=1731897085549) 2024-11-18T02:31:26,042 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a74ea0b8e577f38 with lease ID 0x12f569af66c9e9e0: from storage DS-528eb75c-14ed-4f18-88a3-29ac8dfc3c18 node DatanodeRegistration(127.0.0.1:41447, datanodeUuid=a536b8c4-0094-4746-ad09-bc98bd36bf3b, infoPort=37667, infoSecurePort=0, ipcPort=34601, storageInfo=lv=-57;cid=testClusterID;nsid=58354486;c=1731897085549), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:31:26,042 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a74ea0b8e577f38 with lease ID 0x12f569af66c9e9e0: Processing first storage report for DS-cb2a3c94-ea9b-4a75-b690-8c57b39c0891 from datanode DatanodeRegistration(127.0.0.1:41447, datanodeUuid=a536b8c4-0094-4746-ad09-bc98bd36bf3b, infoPort=37667, infoSecurePort=0, ipcPort=34601, storageInfo=lv=-57;cid=testClusterID;nsid=58354486;c=1731897085549) 2024-11-18T02:31:26,042 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a74ea0b8e577f38 with lease ID 0x12f569af66c9e9e0: from storage DS-cb2a3c94-ea9b-4a75-b690-8c57b39c0891 node DatanodeRegistration(127.0.0.1:41447, datanodeUuid=a536b8c4-0094-4746-ad09-bc98bd36bf3b, infoPort=37667, infoSecurePort=0, ipcPort=34601, storageInfo=lv=-57;cid=testClusterID;nsid=58354486;c=1731897085549), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:31:26,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f8e2aa3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/java.io.tmpdir/jetty-localhost-33909-hadoop-hdfs-3_4_1-tests_jar-_-any-11066160609189313730/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:31:26,082 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@151a0943{HTTP/1.1, (http/1.1)}{localhost:33909} 2024-11-18T02:31:26,082 INFO [Time-limited test {}] server.Server(415): Started @184538ms 2024-11-18T02:31:26,083 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:31:26,184 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/data/data3/current/BP-1868350782-172.17.0.2-1731897085549/current, will proceed with Du for space computation calculation, 2024-11-18T02:31:26,184 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/data/data4/current/BP-1868350782-172.17.0.2-1731897085549/current, will proceed with Du for space computation calculation, 2024-11-18T02:31:26,207 WARN [Thread-1649 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:31:26,209 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c1b19769f8598f0 with lease ID 0x12f569af66c9e9e1: Processing first storage report for DS-6ed379e7-a896-4aa7-a1b4-7af21bb41309 from datanode DatanodeRegistration(127.0.0.1:38529, datanodeUuid=208ea11f-ed99-4229-9cb7-ae25e5cb1b5a, infoPort=41915, infoSecurePort=0, ipcPort=39569, storageInfo=lv=-57;cid=testClusterID;nsid=58354486;c=1731897085549) 2024-11-18T02:31:26,209 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c1b19769f8598f0 with lease ID 0x12f569af66c9e9e1: from storage DS-6ed379e7-a896-4aa7-a1b4-7af21bb41309 node DatanodeRegistration(127.0.0.1:38529, datanodeUuid=208ea11f-ed99-4229-9cb7-ae25e5cb1b5a, infoPort=41915, infoSecurePort=0, ipcPort=39569, storageInfo=lv=-57;cid=testClusterID;nsid=58354486;c=1731897085549), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T02:31:26,209 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c1b19769f8598f0 with lease ID 0x12f569af66c9e9e1: Processing first storage report for DS-0314de49-8b95-4721-9e8c-042538193171 from datanode DatanodeRegistration(127.0.0.1:38529, datanodeUuid=208ea11f-ed99-4229-9cb7-ae25e5cb1b5a, infoPort=41915, infoSecurePort=0, ipcPort=39569, storageInfo=lv=-57;cid=testClusterID;nsid=58354486;c=1731897085549) 2024-11-18T02:31:26,209 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c1b19769f8598f0 with lease ID 0x12f569af66c9e9e1: from storage DS-0314de49-8b95-4721-9e8c-042538193171 node DatanodeRegistration(127.0.0.1:38529, datanodeUuid=208ea11f-ed99-4229-9cb7-ae25e5cb1b5a, infoPort=41915, infoSecurePort=0, ipcPort=39569, storageInfo=lv=-57;cid=testClusterID;nsid=58354486;c=1731897085549), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:31:26,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:26,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:26,306 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333 2024-11-18T02:31:26,308 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/zookeeper_0, clientPort=58976, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T02:31:26,309 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58976 2024-11-18T02:31:26,309 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:31:26,310 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:31:26,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:31:26,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:31:26,320 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52 with version=8 2024-11-18T02:31:26,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/hbase-staging 2024-11-18T02:31:26,322 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:31:26,322 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:31:26,322 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:31:26,322 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:31:26,322 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:31:26,322 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:31:26,322 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T02:31:26,322 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:31:26,323 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41317 2024-11-18T02:31:26,324 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41317 connecting to ZooKeeper ensemble=127.0.0.1:58976 2024-11-18T02:31:26,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:413170x0, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:31:26,333 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41317-0x10128ea3bf30000 connected 2024-11-18T02:31:26,346 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:31:26,348 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:31:26,349 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:31:26,349 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52, hbase.cluster.distributed=false 2024-11-18T02:31:26,351 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:31:26,351 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41317 2024-11-18T02:31:26,351 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41317 2024-11-18T02:31:26,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41317 2024-11-18T02:31:26,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41317 2024-11-18T02:31:26,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41317 2024-11-18T02:31:26,367 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:31:26,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:31:26,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:31:26,367 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:31:26,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:31:26,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:31:26,367 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T02:31:26,368 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:31:26,368 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35037 2024-11-18T02:31:26,369 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35037 connecting to ZooKeeper ensemble=127.0.0.1:58976 2024-11-18T02:31:26,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:31:26,371 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:31:26,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350370x0, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:31:26,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:350370x0, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:31:26,377 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35037-0x10128ea3bf30001 connected 2024-11-18T02:31:26,377 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T02:31:26,378 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T02:31:26,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T02:31:26,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:31:26,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35037 2024-11-18T02:31:26,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35037 2024-11-18T02:31:26,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35037 2024-11-18T02:31:26,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35037 2024-11-18T02:31:26,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35037 2024-11-18T02:31:26,392 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c4730a2bacf8:41317 2024-11-18T02:31:26,392 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c4730a2bacf8,41317,1731897086322 2024-11-18T02:31:26,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:31:26,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:31:26,394 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c4730a2bacf8,41317,1731897086322 2024-11-18T02:31:26,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T02:31:26,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,396 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T02:31:26,397 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c4730a2bacf8,41317,1731897086322 from backup master directory 2024-11-18T02:31:26,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c4730a2bacf8,41317,1731897086322 2024-11-18T02:31:26,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:31:26,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:31:26,398 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:31:26,398 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c4730a2bacf8,41317,1731897086322 2024-11-18T02:31:26,402 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/hbase.id] with ID: de7c7101-b82f-4942-bdd7-06ced45ec831 2024-11-18T02:31:26,402 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/.tmp/hbase.id 2024-11-18T02:31:26,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:31:26,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:31:26,411 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/.tmp/hbase.id]:[hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/hbase.id] 2024-11-18T02:31:26,421 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:31:26,421 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T02:31:26,422 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T02:31:26,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:31:26,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:31:26,433 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:31:26,434 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T02:31:26,434 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:31:26,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:31:26,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:31:26,442 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store 2024-11-18T02:31:26,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:31:26,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:31:26,448 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:31:26,449 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:31:26,449 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:31:26,449 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:31:26,449 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:31:26,449 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:31:26,449 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:31:26,449 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897086449Disabling compacts and flushes for region at 1731897086449Disabling writes for close at 1731897086449Writing region close event to WAL at 1731897086449Closed at 1731897086449 2024-11-18T02:31:26,450 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/.initializing 2024-11-18T02:31:26,450 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/WALs/c4730a2bacf8,41317,1731897086322 2024-11-18T02:31:26,452 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C41317%2C1731897086322, suffix=, logDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/WALs/c4730a2bacf8,41317,1731897086322, archiveDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/oldWALs, maxLogs=10 2024-11-18T02:31:26,452 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C41317%2C1731897086322.1731897086452 2024-11-18T02:31:26,456 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/WALs/c4730a2bacf8,41317,1731897086322/c4730a2bacf8%2C41317%2C1731897086322.1731897086452 2024-11-18T02:31:26,458 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41915:41915),(127.0.0.1/127.0.0.1:37667:37667)] 2024-11-18T02:31:26,459 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:31:26,459 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:31:26,459 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,459 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T02:31:26,464 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:26,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:31:26,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T02:31:26,466 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:26,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:31:26,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T02:31:26,467 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:26,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:31:26,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T02:31:26,468 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:26,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:31:26,469 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,470 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,470 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,471 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,471 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,472 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T02:31:26,473 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:31:26,475 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:31:26,475 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733977, jitterRate=-0.06670078635215759}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T02:31:26,476 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731897086459Initializing all the Stores at 1731897086460 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897086460Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897086462 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897086462Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897086462Cleaning up temporary data from old regions at 1731897086471 (+9 ms)Region opened successfully at 1731897086475 (+4 ms) 2024-11-18T02:31:26,476 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T02:31:26,480 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73e0e523, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:31:26,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T02:31:26,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T02:31:26,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T02:31:26,481 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T02:31:26,482 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T02:31:26,482 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T02:31:26,482 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T02:31:26,484 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T02:31:26,484 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T02:31:26,486 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T02:31:26,487 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T02:31:26,487 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T02:31:26,489 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T02:31:26,489 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T02:31:26,490 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T02:31:26,491 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T02:31:26,492 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T02:31:26,494 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T02:31:26,496 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T02:31:26,497 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T02:31:26,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:31:26,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:31:26,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,499 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c4730a2bacf8,41317,1731897086322, sessionid=0x10128ea3bf30000, setting cluster-up flag (Was=false) 2024-11-18T02:31:26,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,507 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T02:31:26,508 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,41317,1731897086322 2024-11-18T02:31:26,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,517 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T02:31:26,518 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,41317,1731897086322 2024-11-18T02:31:26,519 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T02:31:26,521 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T02:31:26,521 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T02:31:26,521 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T02:31:26,521 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c4730a2bacf8,41317,1731897086322 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T02:31:26,523 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:31:26,523 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:31:26,523 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:31:26,523 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:31:26,523 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c4730a2bacf8:0, corePoolSize=10, maxPoolSize=10 2024-11-18T02:31:26,523 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,523 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:31:26,523 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,524 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731897116524 2024-11-18T02:31:26,524 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T02:31:26,524 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T02:31:26,524 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T02:31:26,524 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T02:31:26,524 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T02:31:26,524 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T02:31:26,524 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,525 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T02:31:26,525 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T02:31:26,525 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:31:26,525 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T02:31:26,525 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T02:31:26,525 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T02:31:26,525 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T02:31:26,525 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897086525,5,FailOnTimeoutGroup] 2024-11-18T02:31:26,525 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897086525,5,FailOnTimeoutGroup] 2024-11-18T02:31:26,525 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,526 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T02:31:26,526 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,526 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,526 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:26,526 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T02:31:26,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:31:26,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:31:26,532 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T02:31:26,533 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52 2024-11-18T02:31:26,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:31:26,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:31:26,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:31:26,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:31:26,542 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:31:26,542 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:26,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:31:26,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:31:26,543 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:31:26,543 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:26,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:31:26,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:31:26,545 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:31:26,545 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:26,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:31:26,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:31:26,546 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:31:26,546 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:26,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:31:26,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:31:26,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740 2024-11-18T02:31:26,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740 2024-11-18T02:31:26,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:31:26,549 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:31:26,549 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:31:26,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:31:26,552 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:31:26,553 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826565, jitterRate=0.05103202164173126}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:31:26,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731897086539Initializing all the Stores at 1731897086540 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897086540Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897086541 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897086541Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897086541Cleaning up temporary data from old regions at 1731897086549 (+8 ms)Region opened successfully at 1731897086553 (+4 ms) 2024-11-18T02:31:26,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:31:26,553 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:31:26,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:31:26,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:31:26,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:31:26,554 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:31:26,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897086553Disabling compacts and flushes for region at 1731897086553Disabling writes for close at 1731897086554 (+1 ms)Writing region close event to WAL at 1731897086554Closed at 1731897086554 2024-11-18T02:31:26,556 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:31:26,556 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T02:31:26,556 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T02:31:26,557 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:31:26,559 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T02:31:26,582 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(746): ClusterId : de7c7101-b82f-4942-bdd7-06ced45ec831 2024-11-18T02:31:26,582 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T02:31:26,587 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T02:31:26,587 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T02:31:26,589 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T02:31:26,589 DEBUG [RS:0;c4730a2bacf8:35037 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4be6932b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:31:26,601 DEBUG [RS:0;c4730a2bacf8:35037 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c4730a2bacf8:35037 2024-11-18T02:31:26,601 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T02:31:26,601 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T02:31:26,601 DEBUG [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T02:31:26,601 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(2659): reportForDuty to master=c4730a2bacf8,41317,1731897086322 with port=35037, startcode=1731897086367 2024-11-18T02:31:26,601 DEBUG [RS:0;c4730a2bacf8:35037 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T02:31:26,603 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42585, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T02:31:26,604 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41317 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:26,604 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41317 {}] master.ServerManager(517): Registering regionserver=c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:26,605 DEBUG [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52 2024-11-18T02:31:26,605 DEBUG [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33253 2024-11-18T02:31:26,605 DEBUG [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T02:31:26,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:31:26,607 DEBUG [RS:0;c4730a2bacf8:35037 {}] zookeeper.ZKUtil(111): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:26,607 WARN [RS:0;c4730a2bacf8:35037 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:31:26,608 INFO [RS:0;c4730a2bacf8:35037 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:31:26,608 DEBUG [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:26,608 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c4730a2bacf8,35037,1731897086367] 2024-11-18T02:31:26,612 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T02:31:26,614 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T02:31:26,614 INFO [RS:0;c4730a2bacf8:35037 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T02:31:26,614 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,614 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T02:31:26,615 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T02:31:26,615 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,615 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:31:26,616 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:31:26,616 DEBUG [RS:0;c4730a2bacf8:35037 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:31:26,616 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,616 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,616 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,616 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,616 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,616 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,35037,1731897086367-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:31:26,631 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T02:31:26,631 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,35037,1731897086367-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,631 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,631 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.Replication(171): c4730a2bacf8,35037,1731897086367 started 2024-11-18T02:31:26,645 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:26,645 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(1482): Serving as c4730a2bacf8,35037,1731897086367, RpcServer on c4730a2bacf8/172.17.0.2:35037, sessionid=0x10128ea3bf30001 2024-11-18T02:31:26,646 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T02:31:26,646 DEBUG [RS:0;c4730a2bacf8:35037 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:26,646 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,35037,1731897086367' 2024-11-18T02:31:26,646 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T02:31:26,646 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T02:31:26,647 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T02:31:26,647 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T02:31:26,647 DEBUG [RS:0;c4730a2bacf8:35037 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:26,647 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,35037,1731897086367' 2024-11-18T02:31:26,647 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T02:31:26,647 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T02:31:26,647 DEBUG [RS:0;c4730a2bacf8:35037 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T02:31:26,647 INFO [RS:0;c4730a2bacf8:35037 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T02:31:26,647 INFO [RS:0;c4730a2bacf8:35037 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T02:31:26,709 WARN [c4730a2bacf8:41317 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T02:31:26,749 INFO [RS:0;c4730a2bacf8:35037 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C35037%2C1731897086367, suffix=, logDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367, archiveDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/oldWALs, maxLogs=32 2024-11-18T02:31:26,750 INFO [RS:0;c4730a2bacf8:35037 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C35037%2C1731897086367.1731897086749 2024-11-18T02:31:26,756 INFO [RS:0;c4730a2bacf8:35037 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897086749 2024-11-18T02:31:26,757 DEBUG [RS:0;c4730a2bacf8:35037 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:41915:41915)] 2024-11-18T02:31:26,959 DEBUG [c4730a2bacf8:41317 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T02:31:26,960 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:26,961 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,35037,1731897086367, state=OPENING 2024-11-18T02:31:26,963 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T02:31:26,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:31:26,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:31:26,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:31:26,967 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:31:26,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,35037,1731897086367}] 2024-11-18T02:31:27,120 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T02:31:27,122 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55429, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T02:31:27,125 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T02:31:27,126 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:31:27,127 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C35037%2C1731897086367.meta, suffix=.meta, logDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367, archiveDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/oldWALs, maxLogs=32 2024-11-18T02:31:27,128 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C35037%2C1731897086367.meta.1731897087128.meta 2024-11-18T02:31:27,132 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.meta.1731897087128.meta 2024-11-18T02:31:27,133 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37667:37667),(127.0.0.1/127.0.0.1:41915:41915)] 2024-11-18T02:31:27,134 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:31:27,134 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T02:31:27,134 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T02:31:27,134 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T02:31:27,135 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T02:31:27,135 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:31:27,135 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T02:31:27,135 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T02:31:27,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:31:27,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:31:27,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:27,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:31:27,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:31:27,138 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:31:27,138 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:27,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:31:27,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:31:27,139 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:31:27,139 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:27,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:31:27,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:31:27,140 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:31:27,140 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:27,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:31:27,141 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:31:27,141 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740 2024-11-18T02:31:27,142 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740 2024-11-18T02:31:27,143 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:31:27,143 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:31:27,144 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:31:27,145 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:31:27,146 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863183, jitterRate=0.09759461879730225}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:31:27,146 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T02:31:27,146 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731897087135Writing region info on filesystem at 1731897087135Initializing all the Stores at 1731897087136 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897087136Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897087136Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897087136Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897087136Cleaning up temporary data from old regions at 1731897087143 (+7 ms)Running coprocessor post-open hooks at 1731897087146 (+3 ms)Region opened successfully at 1731897087146 2024-11-18T02:31:27,147 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731897087120 2024-11-18T02:31:27,150 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T02:31:27,150 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T02:31:27,151 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:27,151 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,35037,1731897086367, state=OPEN 2024-11-18T02:31:27,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:31:27,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:31:27,157 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:27,157 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:31:27,157 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:31:27,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T02:31:27,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,35037,1731897086367 in 190 msec 2024-11-18T02:31:27,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T02:31:27,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-18T02:31:27,163 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:31:27,163 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T02:31:27,164 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:31:27,164 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,35037,1731897086367, seqNum=-1] 2024-11-18T02:31:27,165 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:31:27,166 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40369, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:31:27,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 650 msec 2024-11-18T02:31:27,171 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731897087171, completionTime=-1 2024-11-18T02:31:27,171 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T02:31:27,171 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T02:31:27,173 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T02:31:27,173 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731897147173 2024-11-18T02:31:27,173 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731897207173 2024-11-18T02:31:27,173 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-18T02:31:27,173 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,41317,1731897086322-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:27,173 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,41317,1731897086322-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:27,173 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,41317,1731897086322-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:27,174 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c4730a2bacf8:41317, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:27,174 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:27,174 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:27,175 DEBUG [master/c4730a2bacf8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T02:31:27,177 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.779sec 2024-11-18T02:31:27,177 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T02:31:27,177 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T02:31:27,177 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T02:31:27,177 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T02:31:27,177 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T02:31:27,177 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,41317,1731897086322-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:31:27,177 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,41317,1731897086322-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T02:31:27,179 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T02:31:27,179 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T02:31:27,179 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,41317,1731897086322-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:31:27,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67d9f3fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:31:27,182 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c4730a2bacf8,41317,-1 for getting cluster id 2024-11-18T02:31:27,183 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T02:31:27,184 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'de7c7101-b82f-4942-bdd7-06ced45ec831' 2024-11-18T02:31:27,184 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T02:31:27,185 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "de7c7101-b82f-4942-bdd7-06ced45ec831" 2024-11-18T02:31:27,185 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8b86581, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:31:27,185 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c4730a2bacf8,41317,-1] 2024-11-18T02:31:27,185 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T02:31:27,185 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:31:27,186 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39616, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T02:31:27,187 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6091818, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:31:27,187 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:31:27,188 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,35037,1731897086367, seqNum=-1] 2024-11-18T02:31:27,188 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:31:27,189 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49744, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:31:27,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c4730a2bacf8,41317,1731897086322 2024-11-18T02:31:27,191 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:31:27,194 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T02:31:27,194 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T02:31:27,195 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is c4730a2bacf8,41317,1731897086322 2024-11-18T02:31:27,195 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@10e8dd7c 2024-11-18T02:31:27,195 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T02:31:27,196 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39626, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T02:31:27,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T02:31:27,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T02:31:27,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:31:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:31:27,199 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T02:31:27,199 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:27,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-18T02:31:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:31:27,200 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T02:31:27,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741835_1011 (size=405) 2024-11-18T02:31:27,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741835_1011 (size=405) 2024-11-18T02:31:27,209 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6ca9efd5eae150fd1b82f928e7e92eb7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52 2024-11-18T02:31:27,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741836_1012 (size=88) 2024-11-18T02:31:27,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741836_1012 (size=88) 2024-11-18T02:31:27,215 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:31:27,215 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 6ca9efd5eae150fd1b82f928e7e92eb7, disabling compactions & flushes 2024-11-18T02:31:27,216 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:27,216 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:27,216 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. after waiting 0 ms 2024-11-18T02:31:27,216 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:27,216 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:27,216 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6ca9efd5eae150fd1b82f928e7e92eb7: Waiting for close lock at 1731897087215Disabling compacts and flushes for region at 1731897087215Disabling writes for close at 1731897087216 (+1 ms)Writing region close event to WAL at 1731897087216Closed at 1731897087216 2024-11-18T02:31:27,217 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T02:31:27,218 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731897087217"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731897087217"}]},"ts":"1731897087217"} 2024-11-18T02:31:27,220 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T02:31:27,221 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T02:31:27,221 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731897087221"}]},"ts":"1731897087221"} 2024-11-18T02:31:27,223 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-18T02:31:27,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6ca9efd5eae150fd1b82f928e7e92eb7, ASSIGN}] 2024-11-18T02:31:27,225 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6ca9efd5eae150fd1b82f928e7e92eb7, ASSIGN 2024-11-18T02:31:27,226 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6ca9efd5eae150fd1b82f928e7e92eb7, ASSIGN; state=OFFLINE, location=c4730a2bacf8,35037,1731897086367; forceNewPlan=false, retain=false 2024-11-18T02:31:27,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:27,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:27,376 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6ca9efd5eae150fd1b82f928e7e92eb7, regionState=OPENING, regionLocation=c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:27,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6ca9efd5eae150fd1b82f928e7e92eb7, ASSIGN because future has completed 2024-11-18T02:31:27,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ca9efd5eae150fd1b82f928e7e92eb7, server=c4730a2bacf8,35037,1731897086367}] 2024-11-18T02:31:27,536 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:27,536 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6ca9efd5eae150fd1b82f928e7e92eb7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:31:27,536 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,536 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:31:27,537 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,537 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,538 INFO [StoreOpener-6ca9efd5eae150fd1b82f928e7e92eb7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,539 INFO [StoreOpener-6ca9efd5eae150fd1b82f928e7e92eb7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6ca9efd5eae150fd1b82f928e7e92eb7 columnFamilyName info 2024-11-18T02:31:27,539 DEBUG [StoreOpener-6ca9efd5eae150fd1b82f928e7e92eb7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:31:27,540 INFO [StoreOpener-6ca9efd5eae150fd1b82f928e7e92eb7-1 {}] regionserver.HStore(327): Store=6ca9efd5eae150fd1b82f928e7e92eb7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:31:27,540 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,540 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,541 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,541 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,541 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,543 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,544 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:31:27,545 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6ca9efd5eae150fd1b82f928e7e92eb7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851229, jitterRate=0.08239428699016571}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T02:31:27,545 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:31:27,546 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6ca9efd5eae150fd1b82f928e7e92eb7: Running coprocessor pre-open hook at 1731897087537Writing region info on filesystem at 1731897087537Initializing all the Stores at 1731897087537Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897087537Cleaning up temporary data from old regions at 1731897087541 (+4 ms)Running coprocessor post-open hooks at 1731897087545 (+4 ms)Region opened successfully at 1731897087546 (+1 ms) 2024-11-18T02:31:27,547 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7., pid=6, masterSystemTime=1731897087532 2024-11-18T02:31:27,550 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:27,550 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:27,551 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6ca9efd5eae150fd1b82f928e7e92eb7, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,35037,1731897086367 2024-11-18T02:31:27,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ca9efd5eae150fd1b82f928e7e92eb7, server=c4730a2bacf8,35037,1731897086367 because future has completed 2024-11-18T02:31:27,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T02:31:27,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6ca9efd5eae150fd1b82f928e7e92eb7, server=c4730a2bacf8,35037,1731897086367 in 175 msec 2024-11-18T02:31:27,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T02:31:27,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=6ca9efd5eae150fd1b82f928e7e92eb7, ASSIGN in 334 msec 2024-11-18T02:31:27,561 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T02:31:27,561 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731897087561"}]},"ts":"1731897087561"} 2024-11-18T02:31:27,563 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-18T02:31:27,564 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T02:31:27,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 368 msec 2024-11-18T02:31:27,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:31:27,769 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T02:31:27,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:31:27,770 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T02:31:27,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:31:27,770 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T02:31:28,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:28,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:29,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:29,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:30,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:30,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:31,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:31,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:32,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:32,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:32,636 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T02:31:32,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:31:32,664 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T02:31:32,664 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-18T02:31:33,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:33,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:34,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:34,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:35,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:35,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:36,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:36,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:31:37,264 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T02:31:37,264 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-18T02:31:37,267 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:31:37,267 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:37,270 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7., hostname=c4730a2bacf8,35037,1731897086367, seqNum=2] 2024-11-18T02:31:37,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:31:37,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:31:37,283 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T02:31:37,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T02:31:37,284 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T02:31:37,285 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T02:31:37,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:37,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:37,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35037 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-18T02:31:37,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:37,445 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 6ca9efd5eae150fd1b82f928e7e92eb7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T02:31:37,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/10cdee4c9a684c4f957b770d57a96efc is 1080, key is row0001/info:/1731897097271/Put/seqid=0 2024-11-18T02:31:37,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741837_1013 (size=6033) 2024-11-18T02:31:37,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741837_1013 (size=6033) 2024-11-18T02:31:37,466 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/10cdee4c9a684c4f957b770d57a96efc 2024-11-18T02:31:37,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/10cdee4c9a684c4f957b770d57a96efc as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/10cdee4c9a684c4f957b770d57a96efc 2024-11-18T02:31:37,478 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/10cdee4c9a684c4f957b770d57a96efc, entries=1, sequenceid=5, filesize=5.9 K 2024-11-18T02:31:37,479 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6ca9efd5eae150fd1b82f928e7e92eb7 in 34ms, sequenceid=5, compaction requested=false 2024-11-18T02:31:37,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 6ca9efd5eae150fd1b82f928e7e92eb7: 2024-11-18T02:31:37,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-18T02:31:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-18T02:31:37,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-18T02:31:37,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-18T02:31:37,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 210 msec 2024-11-18T02:31:38,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:38,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:39,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:39,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:40,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:40,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:41,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:41,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:42,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:42,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:43,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:43,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:44,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:44,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:45,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:45,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:46,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:46,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:47,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:47,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:47,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T02:31:47,354 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T02:31:47,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:31:47,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:31:47,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T02:31:47,360 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T02:31:47,361 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T02:31:47,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T02:31:47,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35037 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-18T02:31:47,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:47,514 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 6ca9efd5eae150fd1b82f928e7e92eb7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T02:31:47,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/71fa69d9ad2d46a48f98fc0b19f06590 is 1080, key is row0002/info:/1731897107355/Put/seqid=0 2024-11-18T02:31:47,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741838_1014 (size=6033) 2024-11-18T02:31:47,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741838_1014 (size=6033) 2024-11-18T02:31:47,527 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/71fa69d9ad2d46a48f98fc0b19f06590 2024-11-18T02:31:47,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/71fa69d9ad2d46a48f98fc0b19f06590 as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/71fa69d9ad2d46a48f98fc0b19f06590 2024-11-18T02:31:47,538 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/71fa69d9ad2d46a48f98fc0b19f06590, entries=1, sequenceid=9, filesize=5.9 K 2024-11-18T02:31:47,539 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6ca9efd5eae150fd1b82f928e7e92eb7 in 25ms, sequenceid=9, compaction requested=false 2024-11-18T02:31:47,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 6ca9efd5eae150fd1b82f928e7e92eb7: 2024-11-18T02:31:47,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:47,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-18T02:31:47,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-18T02:31:47,544 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-18T02:31:47,544 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-18T02:31:47,546 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-11-18T02:31:48,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:48,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:49,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:49,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:50,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:50,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:51,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:51,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:52,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:52,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:53,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:53,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:53,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta after 68041ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor193.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:53,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor193.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T02:31:54,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:54,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:55,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:55,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:56,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:56,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:56,305 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T02:31:57,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:57,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:57,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T02:31:57,435 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T02:31:57,438 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C35037%2C1731897086367.1731897117437 2024-11-18T02:31:57,443 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:57,443 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:57,443 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:57,443 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:57,443 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:31:57,444 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897086749 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897117437 2024-11-18T02:31:57,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741833_1009 (size=5546) 2024-11-18T02:31:57,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741833_1009 (size=5546) 2024-11-18T02:31:57,447 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41915:41915),(127.0.0.1/127.0.0.1:37667:37667)] 2024-11-18T02:31:57,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:31:57,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:31:57,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T02:31:57,450 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T02:31:57,452 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T02:31:57,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T02:31:57,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35037 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-18T02:31:57,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:57,606 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 6ca9efd5eae150fd1b82f928e7e92eb7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T02:31:57,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/36cb371eaf43478dac3dada549d48978 is 1080, key is row0003/info:/1731897117436/Put/seqid=0 2024-11-18T02:31:57,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741840_1016 (size=6033) 2024-11-18T02:31:57,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741840_1016 (size=6033) 2024-11-18T02:31:57,615 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/36cb371eaf43478dac3dada549d48978 2024-11-18T02:31:57,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/36cb371eaf43478dac3dada549d48978 as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/36cb371eaf43478dac3dada549d48978 2024-11-18T02:31:57,627 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/36cb371eaf43478dac3dada549d48978, entries=1, sequenceid=13, filesize=5.9 K 2024-11-18T02:31:57,628 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6ca9efd5eae150fd1b82f928e7e92eb7 in 23ms, sequenceid=13, compaction requested=true 2024-11-18T02:31:57,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 6ca9efd5eae150fd1b82f928e7e92eb7: 2024-11-18T02:31:57,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:31:57,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-18T02:31:57,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-18T02:31:57,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-18T02:31:57,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-18T02:31:57,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-18T02:31:58,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:58,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:59,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:31:59,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:00,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:00,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:01,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:01,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:02,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:02,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:03,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:03,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:04,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:04,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:05,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:05,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:06,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:06,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:07,223 INFO [master/c4730a2bacf8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T02:32:07,223 INFO [master/c4730a2bacf8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T02:32:07,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:07,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:07,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T02:32:07,494 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T02:32:07,494 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:32:07,496 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:32:07,496 DEBUG [Time-limited test {}] regionserver.HStore(1541): 6ca9efd5eae150fd1b82f928e7e92eb7/info is initiating minor compaction (all files) 2024-11-18T02:32:07,496 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T02:32:07,496 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:07,496 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 6ca9efd5eae150fd1b82f928e7e92eb7/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:32:07,496 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/10cdee4c9a684c4f957b770d57a96efc, hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/71fa69d9ad2d46a48f98fc0b19f06590, hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/36cb371eaf43478dac3dada549d48978] into tmpdir=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp, totalSize=17.7 K 2024-11-18T02:32:07,497 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 10cdee4c9a684c4f957b770d57a96efc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731897097271 2024-11-18T02:32:07,497 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 71fa69d9ad2d46a48f98fc0b19f06590, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731897107355 2024-11-18T02:32:07,497 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 36cb371eaf43478dac3dada549d48978, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731897117436 2024-11-18T02:32:07,509 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 6ca9efd5eae150fd1b82f928e7e92eb7#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:32:07,510 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/1245c6d4c20e48288f198aa0d42690d2 is 1080, key is row0001/info:/1731897097271/Put/seqid=0 2024-11-18T02:32:07,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741841_1017 (size=8296) 2024-11-18T02:32:07,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741841_1017 (size=8296) 2024-11-18T02:32:07,522 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/1245c6d4c20e48288f198aa0d42690d2 as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/1245c6d4c20e48288f198aa0d42690d2 2024-11-18T02:32:07,529 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6ca9efd5eae150fd1b82f928e7e92eb7/info of 6ca9efd5eae150fd1b82f928e7e92eb7 into 1245c6d4c20e48288f198aa0d42690d2(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:32:07,529 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 6ca9efd5eae150fd1b82f928e7e92eb7: 2024-11-18T02:32:07,532 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C35037%2C1731897086367.1731897127532 2024-11-18T02:32:07,538 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:07,538 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:07,538 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:07,538 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:07,538 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:07,539 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897117437 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897127532 2024-11-18T02:32:07,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741839_1015 (size=2520) 2024-11-18T02:32:07,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741839_1015 (size=2520) 2024-11-18T02:32:07,541 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897086749 to hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/oldWALs/c4730a2bacf8%2C35037%2C1731897086367.1731897086749 2024-11-18T02:32:07,544 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41915:41915),(127.0.0.1/127.0.0.1:37667:37667)] 2024-11-18T02:32:07,545 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:32:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:32:07,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T02:32:07,548 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T02:32:07,549 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T02:32:07,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T02:32:07,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35037 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-18T02:32:07,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:32:07,702 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 6ca9efd5eae150fd1b82f928e7e92eb7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T02:32:07,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/0bd0a1af2fd4447dad555be38460044f is 1080, key is row0000/info:/1731897127531/Put/seqid=0 2024-11-18T02:32:07,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741843_1019 (size=6033) 2024-11-18T02:32:07,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741843_1019 (size=6033) 2024-11-18T02:32:07,712 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/0bd0a1af2fd4447dad555be38460044f 2024-11-18T02:32:07,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/0bd0a1af2fd4447dad555be38460044f as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/0bd0a1af2fd4447dad555be38460044f 2024-11-18T02:32:07,722 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/0bd0a1af2fd4447dad555be38460044f, entries=1, sequenceid=18, filesize=5.9 K 2024-11-18T02:32:07,723 INFO [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6ca9efd5eae150fd1b82f928e7e92eb7 in 21ms, sequenceid=18, compaction requested=false 2024-11-18T02:32:07,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 6ca9efd5eae150fd1b82f928e7e92eb7: 2024-11-18T02:32:07,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:32:07,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-18T02:32:07,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-18T02:32:07,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-18T02:32:07,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-18T02:32:07,730 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-18T02:32:08,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:08,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:09,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:09,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:10,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:10,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:11,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:11,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:12,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:12,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:12,537 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6ca9efd5eae150fd1b82f928e7e92eb7, had cached 0 bytes from a total of 14329 2024-11-18T02:32:13,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:13,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:14,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:14,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:15,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:15,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:16,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:16,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:17,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:17,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T02:32:17,554 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T02:32:17,556 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C35037%2C1731897086367.1731897137556 2024-11-18T02:32:17,562 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,562 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,562 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,562 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,562 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,563 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897127532 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897137556 2024-11-18T02:32:17,563 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41915:41915),(127.0.0.1/127.0.0.1:37667:37667)] 2024-11-18T02:32:17,563 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897127532 is not closed yet, will try archiving it next time 2024-11-18T02:32:17,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T02:32:17,563 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/WALs/c4730a2bacf8,35037,1731897086367/c4730a2bacf8%2C35037%2C1731897086367.1731897117437 to hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/oldWALs/c4730a2bacf8%2C35037%2C1731897086367.1731897117437 2024-11-18T02:32:17,564 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:32:17,564 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:32:17,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:32:17,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:32:17,564 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T02:32:17,564 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T02:32:17,564 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=913953815, stopped=false 2024-11-18T02:32:17,564 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c4730a2bacf8,41317,1731897086322 2024-11-18T02:32:17,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741842_1018 (size=2026) 2024-11-18T02:32:17,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741842_1018 (size=2026) 2024-11-18T02:32:17,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:32:17,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:32:17,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:17,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:17,566 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:32:17,566 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:32:17,567 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:32:17,567 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:32:17,567 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c4730a2bacf8,35037,1731897086367' ***** 2024-11-18T02:32:17,567 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T02:32:17,567 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T02:32:17,567 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T02:32:17,567 INFO [RS:0;c4730a2bacf8:35037 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T02:32:17,567 INFO [RS:0;c4730a2bacf8:35037 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T02:32:17,567 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(3091): Received CLOSE for 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:32:17,567 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:32:17,568 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(959): stopping server c4730a2bacf8,35037,1731897086367 2024-11-18T02:32:17,568 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:32:17,568 INFO [RS:0;c4730a2bacf8:35037 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c4730a2bacf8:35037. 2024-11-18T02:32:17,568 DEBUG [RS:0;c4730a2bacf8:35037 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:32:17,568 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6ca9efd5eae150fd1b82f928e7e92eb7, disabling compactions & flushes 2024-11-18T02:32:17,568 DEBUG [RS:0;c4730a2bacf8:35037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:32:17,568 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:32:17,568 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:32:17,568 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T02:32:17,568 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T02:32:17,568 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. after waiting 0 ms 2024-11-18T02:32:17,568 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T02:32:17,568 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:32:17,568 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T02:32:17,568 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6ca9efd5eae150fd1b82f928e7e92eb7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T02:32:17,568 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:32:17,572 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T02:32:17,572 DEBUG [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(1325): Online Regions={6ca9efd5eae150fd1b82f928e7e92eb7=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T02:32:17,572 DEBUG [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6ca9efd5eae150fd1b82f928e7e92eb7 2024-11-18T02:32:17,572 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:32:17,572 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:32:17,572 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:32:17,572 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:32:17,572 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:32:17,573 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-18T02:32:17,576 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/1ef3c2721a3144af88cdebfd8c838198 is 1080, key is row0001/info:/1731897137555/Put/seqid=0 2024-11-18T02:32:17,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741845_1021 (size=6033) 2024-11-18T02:32:17,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741845_1021 (size=6033) 2024-11-18T02:32:17,584 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/1ef3c2721a3144af88cdebfd8c838198 2024-11-18T02:32:17,590 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/.tmp/info/4303e464f06d4c7984177b0adfd48d44 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7./info:regioninfo/1731897087550/Put/seqid=0 2024-11-18T02:32:17,592 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/.tmp/info/1ef3c2721a3144af88cdebfd8c838198 as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/1ef3c2721a3144af88cdebfd8c838198 2024-11-18T02:32:17,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741846_1022 (size=7308) 2024-11-18T02:32:17,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741846_1022 (size=7308) 2024-11-18T02:32:17,595 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/.tmp/info/4303e464f06d4c7984177b0adfd48d44 2024-11-18T02:32:17,597 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/1ef3c2721a3144af88cdebfd8c838198, entries=1, sequenceid=22, filesize=5.9 K 2024-11-18T02:32:17,599 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6ca9efd5eae150fd1b82f928e7e92eb7 in 30ms, sequenceid=22, compaction requested=true 2024-11-18T02:32:17,599 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/10cdee4c9a684c4f957b770d57a96efc, hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/71fa69d9ad2d46a48f98fc0b19f06590, hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/36cb371eaf43478dac3dada549d48978] to archive 2024-11-18T02:32:17,600 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T02:32:17,602 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/10cdee4c9a684c4f957b770d57a96efc to hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/10cdee4c9a684c4f957b770d57a96efc 2024-11-18T02:32:17,603 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/71fa69d9ad2d46a48f98fc0b19f06590 to hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/71fa69d9ad2d46a48f98fc0b19f06590 2024-11-18T02:32:17,604 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/36cb371eaf43478dac3dada549d48978 to hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/info/36cb371eaf43478dac3dada549d48978 2024-11-18T02:32:17,604 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c4730a2bacf8:41317 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T02:32:17,604 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [10cdee4c9a684c4f957b770d57a96efc=6033, 71fa69d9ad2d46a48f98fc0b19f06590=6033, 36cb371eaf43478dac3dada549d48978=6033] 2024-11-18T02:32:17,612 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/6ca9efd5eae150fd1b82f928e7e92eb7/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-18T02:32:17,613 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:32:17,613 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6ca9efd5eae150fd1b82f928e7e92eb7: Waiting for close lock at 1731897137568Running coprocessor pre-close hooks at 1731897137568Disabling compacts and flushes for region at 1731897137568Disabling writes for close at 1731897137568Obtaining lock to block concurrent updates at 1731897137568Preparing flush snapshotting stores in 6ca9efd5eae150fd1b82f928e7e92eb7 at 1731897137568Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731897137568Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. at 1731897137569 (+1 ms)Flushing 6ca9efd5eae150fd1b82f928e7e92eb7/info: creating writer at 1731897137569Flushing 6ca9efd5eae150fd1b82f928e7e92eb7/info: appending metadata at 1731897137576 (+7 ms)Flushing 6ca9efd5eae150fd1b82f928e7e92eb7/info: closing flushed file at 1731897137576Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31b59d91: reopening flushed file at 1731897137591 (+15 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6ca9efd5eae150fd1b82f928e7e92eb7 in 30ms, sequenceid=22, compaction requested=true at 1731897137599 (+8 ms)Writing region close event to WAL at 1731897137608 (+9 ms)Running coprocessor post-close hooks at 1731897137613 (+5 ms)Closed at 1731897137613 2024-11-18T02:32:17,613 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731897087196.6ca9efd5eae150fd1b82f928e7e92eb7. 2024-11-18T02:32:17,621 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/.tmp/ns/cf01c9a0cce64f18a1f9c94d183d6e5e is 43, key is default/ns:d/1731897087167/Put/seqid=0 2024-11-18T02:32:17,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741847_1023 (size=5153) 2024-11-18T02:32:17,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741847_1023 (size=5153) 2024-11-18T02:32:17,626 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/.tmp/ns/cf01c9a0cce64f18a1f9c94d183d6e5e 2024-11-18T02:32:17,653 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/.tmp/table/ba60a264ea35495fb2dfbf05f694fbd1 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731897087561/Put/seqid=0 2024-11-18T02:32:17,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741848_1024 (size=5508) 2024-11-18T02:32:17,657 INFO [regionserver/c4730a2bacf8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T02:32:17,657 INFO [regionserver/c4730a2bacf8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T02:32:17,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741848_1024 (size=5508) 2024-11-18T02:32:17,658 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/.tmp/table/ba60a264ea35495fb2dfbf05f694fbd1 2024-11-18T02:32:17,663 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/.tmp/info/4303e464f06d4c7984177b0adfd48d44 as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/info/4303e464f06d4c7984177b0adfd48d44 2024-11-18T02:32:17,667 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/info/4303e464f06d4c7984177b0adfd48d44, entries=10, sequenceid=11, filesize=7.1 K 2024-11-18T02:32:17,668 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/.tmp/ns/cf01c9a0cce64f18a1f9c94d183d6e5e as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/ns/cf01c9a0cce64f18a1f9c94d183d6e5e 2024-11-18T02:32:17,673 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/ns/cf01c9a0cce64f18a1f9c94d183d6e5e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T02:32:17,673 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/.tmp/table/ba60a264ea35495fb2dfbf05f694fbd1 as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/table/ba60a264ea35495fb2dfbf05f694fbd1 2024-11-18T02:32:17,678 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/table/ba60a264ea35495fb2dfbf05f694fbd1, entries=2, sequenceid=11, filesize=5.4 K 2024-11-18T02:32:17,679 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 107ms, sequenceid=11, compaction requested=false 2024-11-18T02:32:17,683 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T02:32:17,684 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:32:17,684 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:32:17,684 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897137572Running coprocessor pre-close hooks at 1731897137572Disabling compacts and flushes for region at 1731897137572Disabling writes for close at 1731897137572Obtaining lock to block concurrent updates at 1731897137573 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731897137573Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731897137573Flushing stores of hbase:meta,,1.1588230740 at 1731897137573Flushing 1588230740/info: creating writer at 1731897137574 (+1 ms)Flushing 1588230740/info: appending metadata at 1731897137590 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731897137590Flushing 1588230740/ns: creating writer at 1731897137600 (+10 ms)Flushing 1588230740/ns: appending metadata at 1731897137621 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1731897137621Flushing 1588230740/table: creating writer at 1731897137632 (+11 ms)Flushing 1588230740/table: appending metadata at 1731897137652 (+20 ms)Flushing 1588230740/table: closing flushed file at 1731897137652Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@485d5cfb: reopening flushed file at 1731897137662 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@157c43a5: reopening flushed file at 1731897137668 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@276b147b: reopening flushed file at 1731897137673 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 107ms, sequenceid=11, compaction requested=false at 1731897137679 (+6 ms)Writing region close event to WAL at 1731897137680 (+1 ms)Running coprocessor post-close hooks at 1731897137683 (+3 ms)Closed at 1731897137684 (+1 ms) 2024-11-18T02:32:17,684 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T02:32:17,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:32:17,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:32:17,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T02:32:17,772 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(976): stopping server c4730a2bacf8,35037,1731897086367; all regions closed. 2024-11-18T02:32:17,773 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,773 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,773 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,773 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,773 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741834_1010 (size=3306) 2024-11-18T02:32:17,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741834_1010 (size=3306) 2024-11-18T02:32:17,778 DEBUG [RS:0;c4730a2bacf8:35037 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/oldWALs 2024-11-18T02:32:17,778 INFO [RS:0;c4730a2bacf8:35037 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C35037%2C1731897086367.meta:.meta(num 1731897087128) 2024-11-18T02:32:17,778 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,778 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,778 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,778 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,778 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741844_1020 (size=1252) 2024-11-18T02:32:17,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741844_1020 (size=1252) 2024-11-18T02:32:17,783 DEBUG [RS:0;c4730a2bacf8:35037 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/oldWALs 2024-11-18T02:32:17,783 INFO [RS:0;c4730a2bacf8:35037 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C35037%2C1731897086367:(num 1731897137556) 2024-11-18T02:32:17,783 DEBUG [RS:0;c4730a2bacf8:35037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:32:17,783 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:32:17,783 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:32:17,783 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.ChoreService(370): Chore service for: regionserver/c4730a2bacf8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T02:32:17,783 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:32:17,783 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:32:17,784 INFO [RS:0;c4730a2bacf8:35037 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35037 2024-11-18T02:32:17,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:32:17,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c4730a2bacf8,35037,1731897086367 2024-11-18T02:32:17,786 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:32:17,787 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c4730a2bacf8,35037,1731897086367] 2024-11-18T02:32:17,788 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c4730a2bacf8,35037,1731897086367 already deleted, retry=false 2024-11-18T02:32:17,788 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c4730a2bacf8,35037,1731897086367 expired; onlineServers=0 2024-11-18T02:32:17,788 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c4730a2bacf8,41317,1731897086322' ***** 2024-11-18T02:32:17,788 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T02:32:17,788 INFO [M:0;c4730a2bacf8:41317 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:32:17,788 INFO [M:0;c4730a2bacf8:41317 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:32:17,788 DEBUG [M:0;c4730a2bacf8:41317 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T02:32:17,788 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T02:32:17,788 DEBUG [M:0;c4730a2bacf8:41317 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T02:32:17,788 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897086525 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897086525,5,FailOnTimeoutGroup] 2024-11-18T02:32:17,788 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897086525 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897086525,5,FailOnTimeoutGroup] 2024-11-18T02:32:17,788 INFO [M:0;c4730a2bacf8:41317 {}] hbase.ChoreService(370): Chore service for: master/c4730a2bacf8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T02:32:17,789 INFO [M:0;c4730a2bacf8:41317 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:32:17,789 DEBUG [M:0;c4730a2bacf8:41317 {}] master.HMaster(1795): Stopping service threads 2024-11-18T02:32:17,789 INFO [M:0;c4730a2bacf8:41317 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T02:32:17,789 INFO [M:0;c4730a2bacf8:41317 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:32:17,789 INFO [M:0;c4730a2bacf8:41317 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T02:32:17,789 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T02:32:17,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T02:32:17,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:17,791 DEBUG [M:0;c4730a2bacf8:41317 {}] zookeeper.ZKUtil(347): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T02:32:17,791 WARN [M:0;c4730a2bacf8:41317 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T02:32:17,792 INFO [M:0;c4730a2bacf8:41317 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/.lastflushedseqids 2024-11-18T02:32:17,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741849_1025 (size=130) 2024-11-18T02:32:17,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741849_1025 (size=130) 2024-11-18T02:32:17,797 INFO [M:0;c4730a2bacf8:41317 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T02:32:17,797 INFO [M:0;c4730a2bacf8:41317 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T02:32:17,797 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:32:17,797 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:32:17,797 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:32:17,797 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:32:17,797 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:32:17,797 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.56 KB heapSize=54.94 KB 2024-11-18T02:32:17,813 DEBUG [M:0;c4730a2bacf8:41317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d926fb0ab8614ed898165627655ab21e is 82, key is hbase:meta,,1/info:regioninfo/1731897087151/Put/seqid=0 2024-11-18T02:32:17,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741850_1026 (size=5672) 2024-11-18T02:32:17,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741850_1026 (size=5672) 2024-11-18T02:32:17,818 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d926fb0ab8614ed898165627655ab21e 2024-11-18T02:32:17,838 DEBUG [M:0;c4730a2bacf8:41317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6d3c48aacfa348178bebd6ba1792dcc1 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731897087565/Put/seqid=0 2024-11-18T02:32:17,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741851_1027 (size=7819) 2024-11-18T02:32:17,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741851_1027 (size=7819) 2024-11-18T02:32:17,842 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6d3c48aacfa348178bebd6ba1792dcc1 2024-11-18T02:32:17,847 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6d3c48aacfa348178bebd6ba1792dcc1 2024-11-18T02:32:17,861 DEBUG [M:0;c4730a2bacf8:41317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/66b4436b723543f7b88cfea6978a9a53 is 69, key is c4730a2bacf8,35037,1731897086367/rs:state/1731897086604/Put/seqid=0 2024-11-18T02:32:17,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741852_1028 (size=5156) 2024-11-18T02:32:17,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741852_1028 (size=5156) 2024-11-18T02:32:17,865 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/66b4436b723543f7b88cfea6978a9a53 2024-11-18T02:32:17,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:32:17,887 INFO [RS:0;c4730a2bacf8:35037 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:32:17,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35037-0x10128ea3bf30001, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:32:17,887 INFO [RS:0;c4730a2bacf8:35037 {}] regionserver.HRegionServer(1031): Exiting; stopping=c4730a2bacf8,35037,1731897086367; zookeeper connection closed. 2024-11-18T02:32:17,887 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1047f176 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1047f176 2024-11-18T02:32:17,887 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T02:32:17,891 DEBUG [M:0;c4730a2bacf8:41317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c61e7a300ded493991258558e7c910e5 is 52, key is load_balancer_on/state:d/1731897087193/Put/seqid=0 2024-11-18T02:32:17,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741853_1029 (size=5056) 2024-11-18T02:32:17,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741853_1029 (size=5056) 2024-11-18T02:32:17,896 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c61e7a300ded493991258558e7c910e5 2024-11-18T02:32:17,901 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d926fb0ab8614ed898165627655ab21e as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d926fb0ab8614ed898165627655ab21e 2024-11-18T02:32:17,905 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d926fb0ab8614ed898165627655ab21e, entries=8, sequenceid=121, filesize=5.5 K 2024-11-18T02:32:17,906 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6d3c48aacfa348178bebd6ba1792dcc1 as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6d3c48aacfa348178bebd6ba1792dcc1 2024-11-18T02:32:17,909 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6d3c48aacfa348178bebd6ba1792dcc1 2024-11-18T02:32:17,909 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6d3c48aacfa348178bebd6ba1792dcc1, entries=14, sequenceid=121, filesize=7.6 K 2024-11-18T02:32:17,910 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/66b4436b723543f7b88cfea6978a9a53 as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/66b4436b723543f7b88cfea6978a9a53 2024-11-18T02:32:17,914 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/66b4436b723543f7b88cfea6978a9a53, entries=1, sequenceid=121, filesize=5.0 K 2024-11-18T02:32:17,915 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c61e7a300ded493991258558e7c910e5 as hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c61e7a300ded493991258558e7c910e5 2024-11-18T02:32:17,919 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33253/user/jenkins/test-data/653e1c4c-73b3-2263-1692-dfc2edac9f52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c61e7a300ded493991258558e7c910e5, entries=1, sequenceid=121, filesize=4.9 K 2024-11-18T02:32:17,920 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.56 KB/44602, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=121, compaction requested=false 2024-11-18T02:32:17,921 INFO [M:0;c4730a2bacf8:41317 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:32:17,921 DEBUG [M:0;c4730a2bacf8:41317 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897137797Disabling compacts and flushes for region at 1731897137797Disabling writes for close at 1731897137797Obtaining lock to block concurrent updates at 1731897137797Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731897137797Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44602, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1731897137798 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731897137798Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731897137798Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731897137813 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731897137813Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731897137823 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731897137837 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731897137837Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731897137847 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731897137860 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731897137860Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731897137869 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731897137890 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731897137890Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14481067: reopening flushed file at 1731897137900 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4811913c: reopening flushed file at 1731897137905 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b19e88a: reopening flushed file at 1731897137910 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57b1f94f: reopening flushed file at 1731897137914 (+4 ms)Finished flush of dataSize ~43.56 KB/44602, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=121, compaction requested=false at 1731897137920 (+6 ms)Writing region close event to WAL at 1731897137921 (+1 ms)Closed at 1731897137921 2024-11-18T02:32:17,921 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,921 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,922 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,922 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,922 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:32:17,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41447 is added to blk_1073741830_1006 (size=52999) 2024-11-18T02:32:17,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741830_1006 (size=52999) 2024-11-18T02:32:17,924 INFO [M:0;c4730a2bacf8:41317 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T02:32:17,924 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:32:17,924 INFO [M:0;c4730a2bacf8:41317 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41317 2024-11-18T02:32:17,924 INFO [M:0;c4730a2bacf8:41317 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:32:18,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:32:18,026 INFO [M:0;c4730a2bacf8:41317 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:32:18,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41317-0x10128ea3bf30000, quorum=127.0.0.1:58976, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:32:18,028 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f8e2aa3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:32:18,029 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@151a0943{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:32:18,029 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:32:18,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4957c8be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:32:18,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bfa82bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.log.dir/,STOPPED} 2024-11-18T02:32:18,030 WARN [BP-1868350782-172.17.0.2-1731897085549 heartbeating to localhost/127.0.0.1:33253 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:32:18,030 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:32:18,030 WARN [BP-1868350782-172.17.0.2-1731897085549 heartbeating to localhost/127.0.0.1:33253 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1868350782-172.17.0.2-1731897085549 (Datanode Uuid 208ea11f-ed99-4229-9cb7-ae25e5cb1b5a) service to localhost/127.0.0.1:33253 2024-11-18T02:32:18,030 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:32:18,031 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/data/data3/current/BP-1868350782-172.17.0.2-1731897085549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:32:18,031 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/data/data4/current/BP-1868350782-172.17.0.2-1731897085549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:32:18,031 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:32:18,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@423ae426{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:32:18,042 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b8edabe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:32:18,042 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:32:18,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ff483aa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:32:18,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30d9f702{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.log.dir/,STOPPED} 2024-11-18T02:32:18,044 WARN [BP-1868350782-172.17.0.2-1731897085549 heartbeating to localhost/127.0.0.1:33253 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:32:18,044 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:32:18,044 WARN [BP-1868350782-172.17.0.2-1731897085549 heartbeating to localhost/127.0.0.1:33253 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1868350782-172.17.0.2-1731897085549 (Datanode Uuid a536b8c4-0094-4746-ad09-bc98bd36bf3b) service to localhost/127.0.0.1:33253 2024-11-18T02:32:18,044 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:32:18,044 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/data/data1/current/BP-1868350782-172.17.0.2-1731897085549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:32:18,045 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/cluster_82ad1ee8-5eff-52dc-b135-602f2b070bf6/data/data2/current/BP-1868350782-172.17.0.2-1731897085549 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:32:18,045 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:32:18,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7fb33a9d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:32:18,051 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a560185{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:32:18,051 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:32:18,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b4ea813{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:32:18,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ff27683{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.log.dir/,STOPPED} 2024-11-18T02:32:18,057 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T02:32:18,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T02:32:18,084 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 181) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33253 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33253 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33253 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/c4730a2bacf8:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:33253 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33253 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33253 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33253 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33253 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=44 (was 102), ProcessCount=11 (was 11), AvailableMemoryMB=2865 (was 2924) 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=44, ProcessCount=11, AvailableMemoryMB=2865 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.log.dir so I do NOT create it in target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fd185ffb-0fe9-c0dd-e63f-b02e19742333/hadoop.tmp.dir so I do NOT create it in target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01, deleteOnExit=true 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/test.cache.data in system properties and HBase conf 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.log.dir in system properties and HBase conf 2024-11-18T02:32:18,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T02:32:18,093 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/nfs.dump.dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/java.io.tmpdir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T02:32:18,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T02:32:18,106 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:32:18,171 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:32:18,175 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:32:18,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:32:18,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:32:18,176 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:32:18,177 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:32:18,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4181d37d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:32:18,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2124b505{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:32:18,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3bc081d8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/java.io.tmpdir/jetty-localhost-41843-hadoop-hdfs-3_4_1-tests_jar-_-any-9356275366658379471/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:32:18,292 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63e2e387{HTTP/1.1, (http/1.1)}{localhost:41843} 2024-11-18T02:32:18,292 INFO [Time-limited test {}] server.Server(415): Started @236749ms 2024-11-18T02:32:18,305 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:32:18,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:18,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:18,377 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:32:18,380 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:32:18,381 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:32:18,381 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:32:18,381 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:32:18,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@412b5320{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:32:18,382 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3868302b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:32:18,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@719b1e37{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/java.io.tmpdir/jetty-localhost-38115-hadoop-hdfs-3_4_1-tests_jar-_-any-8333114281816844214/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:32:18,494 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34accf12{HTTP/1.1, (http/1.1)}{localhost:38115} 2024-11-18T02:32:18,494 INFO [Time-limited test {}] server.Server(415): Started @236951ms 2024-11-18T02:32:18,495 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:32:18,523 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:32:18,525 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:32:18,526 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:32:18,526 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:32:18,526 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:32:18,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1204fb24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:32:18,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e4bbe36{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:32:18,590 WARN [Thread-1951 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/data/data1/current/BP-505935065-172.17.0.2-1731897138112/current, will proceed with Du for space computation calculation, 2024-11-18T02:32:18,590 WARN [Thread-1952 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/data/data2/current/BP-505935065-172.17.0.2-1731897138112/current, will proceed with Du for space computation calculation, 2024-11-18T02:32:18,606 WARN [Thread-1930 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:32:18,608 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc9158aea156e354a with lease ID 0x4c021f6bd0cc2104: Processing first storage report for DS-cb2a58d2-cbde-4850-8af8-853e216ae4db from datanode DatanodeRegistration(127.0.0.1:44657, datanodeUuid=b7dad9ef-5a9a-4db9-ba3b-9ed5cd025541, infoPort=44143, infoSecurePort=0, ipcPort=35673, storageInfo=lv=-57;cid=testClusterID;nsid=1242074003;c=1731897138112) 2024-11-18T02:32:18,608 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc9158aea156e354a with lease ID 0x4c021f6bd0cc2104: from storage DS-cb2a58d2-cbde-4850-8af8-853e216ae4db node DatanodeRegistration(127.0.0.1:44657, datanodeUuid=b7dad9ef-5a9a-4db9-ba3b-9ed5cd025541, infoPort=44143, infoSecurePort=0, ipcPort=35673, storageInfo=lv=-57;cid=testClusterID;nsid=1242074003;c=1731897138112), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:32:18,608 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc9158aea156e354a with lease ID 0x4c021f6bd0cc2104: Processing first storage report for DS-ed3b5aa3-8f6d-46ce-a3ca-0a0662107b9c from datanode DatanodeRegistration(127.0.0.1:44657, datanodeUuid=b7dad9ef-5a9a-4db9-ba3b-9ed5cd025541, infoPort=44143, infoSecurePort=0, ipcPort=35673, storageInfo=lv=-57;cid=testClusterID;nsid=1242074003;c=1731897138112) 2024-11-18T02:32:18,608 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc9158aea156e354a with lease ID 0x4c021f6bd0cc2104: from storage DS-ed3b5aa3-8f6d-46ce-a3ca-0a0662107b9c node DatanodeRegistration(127.0.0.1:44657, datanodeUuid=b7dad9ef-5a9a-4db9-ba3b-9ed5cd025541, infoPort=44143, infoSecurePort=0, ipcPort=35673, storageInfo=lv=-57;cid=testClusterID;nsid=1242074003;c=1731897138112), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:32:18,619 INFO [regionserver/c4730a2bacf8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:32:18,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4445ac53{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/java.io.tmpdir/jetty-localhost-43093-hadoop-hdfs-3_4_1-tests_jar-_-any-2678248966950352446/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:32:18,643 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64358886{HTTP/1.1, (http/1.1)}{localhost:43093} 2024-11-18T02:32:18,643 INFO [Time-limited test {}] server.Server(415): Started @237099ms 2024-11-18T02:32:18,644 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:32:18,727 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/data/data3/current/BP-505935065-172.17.0.2-1731897138112/current, will proceed with Du for space computation calculation, 2024-11-18T02:32:18,727 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/data/data4/current/BP-505935065-172.17.0.2-1731897138112/current, will proceed with Du for space computation calculation, 2024-11-18T02:32:18,748 WARN [Thread-1966 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:32:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1dfd22403a3273b2 with lease ID 0x4c021f6bd0cc2105: Processing first storage report for DS-9ae53192-9aae-4622-a764-575236144ac3 from datanode DatanodeRegistration(127.0.0.1:41201, datanodeUuid=20b1e98a-2cf5-4f9e-83a9-8cf40e588edd, infoPort=38699, infoSecurePort=0, ipcPort=33573, storageInfo=lv=-57;cid=testClusterID;nsid=1242074003;c=1731897138112) 2024-11-18T02:32:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1dfd22403a3273b2 with lease ID 0x4c021f6bd0cc2105: from storage DS-9ae53192-9aae-4622-a764-575236144ac3 node DatanodeRegistration(127.0.0.1:41201, datanodeUuid=20b1e98a-2cf5-4f9e-83a9-8cf40e588edd, infoPort=38699, infoSecurePort=0, ipcPort=33573, storageInfo=lv=-57;cid=testClusterID;nsid=1242074003;c=1731897138112), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:32:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1dfd22403a3273b2 with lease ID 0x4c021f6bd0cc2105: Processing first storage report for DS-929b157e-9861-47bd-bfa5-2e3d51f43ee1 from datanode DatanodeRegistration(127.0.0.1:41201, datanodeUuid=20b1e98a-2cf5-4f9e-83a9-8cf40e588edd, infoPort=38699, infoSecurePort=0, ipcPort=33573, storageInfo=lv=-57;cid=testClusterID;nsid=1242074003;c=1731897138112) 2024-11-18T02:32:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1dfd22403a3273b2 with lease ID 0x4c021f6bd0cc2105: from storage DS-929b157e-9861-47bd-bfa5-2e3d51f43ee1 node DatanodeRegistration(127.0.0.1:41201, datanodeUuid=20b1e98a-2cf5-4f9e-83a9-8cf40e588edd, infoPort=38699, infoSecurePort=0, ipcPort=33573, storageInfo=lv=-57;cid=testClusterID;nsid=1242074003;c=1731897138112), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:32:18,769 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f 2024-11-18T02:32:18,772 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/zookeeper_0, clientPort=58446, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T02:32:18,773 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58446 2024-11-18T02:32:18,773 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:32:18,774 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:32:18,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:32:18,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:32:18,782 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264 with version=8 2024-11-18T02:32:18,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/hbase-staging 2024-11-18T02:32:18,784 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:32:18,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:32:18,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:32:18,784 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:32:18,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:32:18,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:32:18,784 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T02:32:18,784 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:32:18,785 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33897 2024-11-18T02:32:18,786 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33897 connecting to ZooKeeper ensemble=127.0.0.1:58446 2024-11-18T02:32:18,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:338970x0, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:32:18,792 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33897-0x10128eb08e30000 connected 2024-11-18T02:32:18,806 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:32:18,807 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:32:18,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:32:18,809 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264, hbase.cluster.distributed=false 2024-11-18T02:32:18,810 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:32:18,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33897 2024-11-18T02:32:18,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33897 2024-11-18T02:32:18,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33897 2024-11-18T02:32:18,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33897 2024-11-18T02:32:18,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33897 2024-11-18T02:32:18,826 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:32:18,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:32:18,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:32:18,826 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:32:18,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:32:18,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:32:18,826 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T02:32:18,826 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:32:18,827 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40979 2024-11-18T02:32:18,828 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40979 connecting to ZooKeeper ensemble=127.0.0.1:58446 2024-11-18T02:32:18,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:32:18,830 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:32:18,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:409790x0, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:32:18,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:32:18,834 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40979-0x10128eb08e30001 connected 2024-11-18T02:32:18,834 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T02:32:18,837 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T02:32:18,837 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T02:32:18,838 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:32:18,838 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40979 2024-11-18T02:32:18,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40979 2024-11-18T02:32:18,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40979 2024-11-18T02:32:18,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40979 2024-11-18T02:32:18,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40979 2024-11-18T02:32:18,851 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c4730a2bacf8:33897 2024-11-18T02:32:18,852 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c4730a2bacf8,33897,1731897138784 2024-11-18T02:32:18,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:32:18,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:32:18,855 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c4730a2bacf8,33897,1731897138784 2024-11-18T02:32:18,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T02:32:18,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,857 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T02:32:18,857 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c4730a2bacf8,33897,1731897138784 from backup master directory 2024-11-18T02:32:18,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c4730a2bacf8,33897,1731897138784 2024-11-18T02:32:18,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:32:18,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:32:18,858 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:32:18,858 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c4730a2bacf8,33897,1731897138784 2024-11-18T02:32:18,862 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/hbase.id] with ID: 9b484fc0-c68f-4e66-a127-582d3f25047b 2024-11-18T02:32:18,862 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/.tmp/hbase.id 2024-11-18T02:32:18,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:32:18,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:32:18,870 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/.tmp/hbase.id]:[hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/hbase.id] 2024-11-18T02:32:18,881 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:32:18,881 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T02:32:18,882 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T02:32:18,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:32:18,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:32:18,890 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:32:18,891 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T02:32:18,891 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:32:18,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:32:18,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:32:18,899 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store 2024-11-18T02:32:18,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:32:18,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:32:18,905 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:32:18,905 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:32:18,905 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:32:18,905 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:32:18,905 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:32:18,905 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:32:18,905 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:32:18,905 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897138905Disabling compacts and flushes for region at 1731897138905Disabling writes for close at 1731897138905Writing region close event to WAL at 1731897138905Closed at 1731897138905 2024-11-18T02:32:18,906 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/.initializing 2024-11-18T02:32:18,906 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/WALs/c4730a2bacf8,33897,1731897138784 2024-11-18T02:32:18,908 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C33897%2C1731897138784, suffix=, logDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/WALs/c4730a2bacf8,33897,1731897138784, archiveDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/oldWALs, maxLogs=10 2024-11-18T02:32:18,909 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33897%2C1731897138784.1731897138909 2024-11-18T02:32:18,913 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/WALs/c4730a2bacf8,33897,1731897138784/c4730a2bacf8%2C33897%2C1731897138784.1731897138909 2024-11-18T02:32:18,913 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44143:44143),(127.0.0.1/127.0.0.1:38699:38699)] 2024-11-18T02:32:18,914 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:32:18,914 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:32:18,914 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,914 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T02:32:18,917 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:18,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:32:18,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T02:32:18,918 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:18,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:32:18,919 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T02:32:18,920 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:18,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:32:18,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,921 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T02:32:18,921 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:18,921 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:32:18,922 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,922 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,922 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,924 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,924 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,924 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T02:32:18,925 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:32:18,927 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:32:18,927 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772892, jitterRate=-0.01721784472465515}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T02:32:18,928 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731897138914Initializing all the Stores at 1731897138915 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897138915Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897138915Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897138915Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897138915Cleaning up temporary data from old regions at 1731897138924 (+9 ms)Region opened successfully at 1731897138928 (+4 ms) 2024-11-18T02:32:18,928 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T02:32:18,931 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@307d20cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:32:18,931 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T02:32:18,932 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T02:32:18,932 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T02:32:18,932 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T02:32:18,932 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T02:32:18,932 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T02:32:18,933 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T02:32:18,934 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T02:32:18,935 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T02:32:18,938 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T02:32:18,938 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T02:32:18,939 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T02:32:18,940 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T02:32:18,940 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T02:32:18,941 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T02:32:18,942 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T02:32:18,943 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T02:32:18,944 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T02:32:18,945 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T02:32:18,951 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T02:32:18,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:32:18,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:32:18,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,953 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c4730a2bacf8,33897,1731897138784, sessionid=0x10128eb08e30000, setting cluster-up flag (Was=false) 2024-11-18T02:32:18,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,961 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T02:32:18,962 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,33897,1731897138784 2024-11-18T02:32:18,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:18,970 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T02:32:18,971 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,33897,1731897138784 2024-11-18T02:32:18,972 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T02:32:18,973 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T02:32:18,974 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T02:32:18,974 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T02:32:18,974 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c4730a2bacf8,33897,1731897138784 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T02:32:18,975 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:32:18,975 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:32:18,975 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:32:18,975 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:32:18,975 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c4730a2bacf8:0, corePoolSize=10, maxPoolSize=10 2024-11-18T02:32:18,975 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:18,975 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:32:18,975 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:18,977 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731897168977 2024-11-18T02:32:18,977 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T02:32:18,977 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T02:32:18,978 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T02:32:18,978 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T02:32:18,978 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T02:32:18,978 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T02:32:18,978 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:18,978 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:32:18,978 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T02:32:18,978 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T02:32:18,978 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T02:32:18,978 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T02:32:18,979 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T02:32:18,979 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T02:32:18,979 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897138979,5,FailOnTimeoutGroup] 2024-11-18T02:32:18,979 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:18,979 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T02:32:18,979 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897138979,5,FailOnTimeoutGroup] 2024-11-18T02:32:18,979 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:18,980 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T02:32:18,980 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:18,980 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:18,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:32:18,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:32:18,987 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T02:32:18,987 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264 2024-11-18T02:32:18,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:32:18,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:32:18,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:32:18,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:32:18,995 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:32:18,995 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:18,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:32:18,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:32:18,996 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:32:18,996 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:18,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:32:18,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:32:18,997 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:32:18,997 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:18,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:32:18,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:32:18,999 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:32:18,999 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:18,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:32:18,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:32:19,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740 2024-11-18T02:32:19,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740 2024-11-18T02:32:19,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:32:19,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:32:19,001 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:32:19,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:32:19,004 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:32:19,004 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847585, jitterRate=0.0777604728937149}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:32:19,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731897138992Initializing all the Stores at 1731897138993 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897138993Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897138993Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897138993Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897138993Cleaning up temporary data from old regions at 1731897139001 (+8 ms)Region opened successfully at 1731897139005 (+4 ms) 2024-11-18T02:32:19,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:32:19,005 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:32:19,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:32:19,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:32:19,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:32:19,005 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:32:19,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897139005Disabling compacts and flushes for region at 1731897139005Disabling writes for close at 1731897139005Writing region close event to WAL at 1731897139005Closed at 1731897139005 2024-11-18T02:32:19,006 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:32:19,006 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T02:32:19,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T02:32:19,008 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:32:19,009 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T02:32:19,041 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(746): ClusterId : 9b484fc0-c68f-4e66-a127-582d3f25047b 2024-11-18T02:32:19,041 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T02:32:19,044 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T02:32:19,044 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T02:32:19,046 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T02:32:19,046 DEBUG [RS:0;c4730a2bacf8:40979 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a71b02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:32:19,058 DEBUG [RS:0;c4730a2bacf8:40979 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c4730a2bacf8:40979 2024-11-18T02:32:19,058 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T02:32:19,058 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T02:32:19,058 DEBUG [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T02:32:19,058 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(2659): reportForDuty to master=c4730a2bacf8,33897,1731897138784 with port=40979, startcode=1731897138825 2024-11-18T02:32:19,059 DEBUG [RS:0;c4730a2bacf8:40979 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T02:32:19,060 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57035, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T02:32:19,061 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33897 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,061 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33897 {}] master.ServerManager(517): Registering regionserver=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,063 DEBUG [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264 2024-11-18T02:32:19,063 DEBUG [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38321 2024-11-18T02:32:19,063 DEBUG [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T02:32:19,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:32:19,065 DEBUG [RS:0;c4730a2bacf8:40979 {}] zookeeper.ZKUtil(111): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,065 WARN [RS:0;c4730a2bacf8:40979 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:32:19,065 INFO [RS:0;c4730a2bacf8:40979 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:32:19,065 DEBUG [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,065 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c4730a2bacf8,40979,1731897138825] 2024-11-18T02:32:19,068 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T02:32:19,070 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T02:32:19,070 INFO [RS:0;c4730a2bacf8:40979 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T02:32:19,070 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,071 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T02:32:19,071 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T02:32:19,072 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:32:19,072 DEBUG [RS:0;c4730a2bacf8:40979 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:32:19,072 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,072 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,072 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,072 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,072 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,072 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,40979,1731897138825-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:32:19,087 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T02:32:19,087 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,40979,1731897138825-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,087 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,087 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.Replication(171): c4730a2bacf8,40979,1731897138825 started 2024-11-18T02:32:19,101 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,101 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(1482): Serving as c4730a2bacf8,40979,1731897138825, RpcServer on c4730a2bacf8/172.17.0.2:40979, sessionid=0x10128eb08e30001 2024-11-18T02:32:19,101 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T02:32:19,101 DEBUG [RS:0;c4730a2bacf8:40979 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,101 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,40979,1731897138825' 2024-11-18T02:32:19,101 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T02:32:19,101 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T02:32:19,102 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T02:32:19,102 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T02:32:19,102 DEBUG [RS:0;c4730a2bacf8:40979 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,102 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,40979,1731897138825' 2024-11-18T02:32:19,102 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T02:32:19,102 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T02:32:19,102 DEBUG [RS:0;c4730a2bacf8:40979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T02:32:19,102 INFO [RS:0;c4730a2bacf8:40979 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T02:32:19,102 INFO [RS:0;c4730a2bacf8:40979 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T02:32:19,159 WARN [c4730a2bacf8:33897 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T02:32:19,204 INFO [RS:0;c4730a2bacf8:40979 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C40979%2C1731897138825, suffix=, logDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825, archiveDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/oldWALs, maxLogs=32 2024-11-18T02:32:19,205 INFO [RS:0;c4730a2bacf8:40979 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C40979%2C1731897138825.1731897139204 2024-11-18T02:32:19,210 INFO [RS:0;c4730a2bacf8:40979 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.1731897139204 2024-11-18T02:32:19,211 DEBUG [RS:0;c4730a2bacf8:40979 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44143:44143),(127.0.0.1/127.0.0.1:38699:38699)] 2024-11-18T02:32:19,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:19,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:19,409 DEBUG [c4730a2bacf8:33897 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T02:32:19,410 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,411 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,40979,1731897138825, state=OPENING 2024-11-18T02:32:19,413 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T02:32:19,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:19,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:32:19,416 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:32:19,416 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:32:19,416 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:32:19,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,40979,1731897138825}] 2024-11-18T02:32:19,569 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T02:32:19,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48601, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T02:32:19,574 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T02:32:19,574 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:32:19,576 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C40979%2C1731897138825.meta, suffix=.meta, logDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825, archiveDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/oldWALs, maxLogs=32 2024-11-18T02:32:19,576 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C40979%2C1731897138825.meta.1731897139576.meta 2024-11-18T02:32:19,581 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.meta.1731897139576.meta 2024-11-18T02:32:19,581 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38699:38699),(127.0.0.1/127.0.0.1:44143:44143)] 2024-11-18T02:32:19,582 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:32:19,582 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T02:32:19,582 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T02:32:19,582 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T02:32:19,582 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T02:32:19,582 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:32:19,583 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T02:32:19,583 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T02:32:19,584 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:32:19,585 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:32:19,585 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:19,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:32:19,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:32:19,586 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:32:19,586 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:19,586 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:32:19,586 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:32:19,587 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:32:19,587 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:19,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:32:19,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:32:19,588 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:32:19,588 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:19,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:32:19,588 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:32:19,589 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740 2024-11-18T02:32:19,590 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740 2024-11-18T02:32:19,591 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:32:19,591 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:32:19,591 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:32:19,592 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:32:19,593 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775672, jitterRate=-0.013682365417480469}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:32:19,593 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T02:32:19,594 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731897139583Writing region info on filesystem at 1731897139583Initializing all the Stores at 1731897139583Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897139583Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897139584 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897139584Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897139584Cleaning up temporary data from old regions at 1731897139591 (+7 ms)Running coprocessor post-open hooks at 1731897139593 (+2 ms)Region opened successfully at 1731897139593 2024-11-18T02:32:19,595 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731897139569 2024-11-18T02:32:19,597 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T02:32:19,597 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T02:32:19,598 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,599 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,40979,1731897138825, state=OPEN 2024-11-18T02:32:19,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:32:19,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:32:19,603 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,603 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:32:19,603 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:32:19,606 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T02:32:19,606 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,40979,1731897138825 in 187 msec 2024-11-18T02:32:19,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T02:32:19,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 599 msec 2024-11-18T02:32:19,609 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:32:19,609 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T02:32:19,610 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:32:19,610 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,40979,1731897138825, seqNum=-1] 2024-11-18T02:32:19,611 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:32:19,612 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51201, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:32:19,617 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 643 msec 2024-11-18T02:32:19,617 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731897139617, completionTime=-1 2024-11-18T02:32:19,617 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T02:32:19,617 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T02:32:19,618 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T02:32:19,618 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731897199618 2024-11-18T02:32:19,619 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731897259619 2024-11-18T02:32:19,619 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-18T02:32:19,619 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33897,1731897138784-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,619 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33897,1731897138784-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,619 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33897,1731897138784-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,619 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c4730a2bacf8:33897, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,619 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,619 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,620 DEBUG [master/c4730a2bacf8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T02:32:19,622 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.763sec 2024-11-18T02:32:19,622 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T02:32:19,622 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T02:32:19,622 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T02:32:19,622 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T02:32:19,622 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T02:32:19,622 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33897,1731897138784-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:32:19,623 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33897,1731897138784-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T02:32:19,625 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T02:32:19,625 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T02:32:19,625 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33897,1731897138784-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:32:19,641 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@552cf870, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:32:19,641 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c4730a2bacf8,33897,-1 for getting cluster id 2024-11-18T02:32:19,641 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T02:32:19,642 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9b484fc0-c68f-4e66-a127-582d3f25047b' 2024-11-18T02:32:19,643 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T02:32:19,643 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9b484fc0-c68f-4e66-a127-582d3f25047b" 2024-11-18T02:32:19,643 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ba621a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:32:19,643 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c4730a2bacf8,33897,-1] 2024-11-18T02:32:19,643 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T02:32:19,643 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:32:19,644 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44658, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T02:32:19,645 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d19a4cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:32:19,645 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:32:19,646 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,40979,1731897138825, seqNum=-1] 2024-11-18T02:32:19,646 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:32:19,647 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39902, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:32:19,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c4730a2bacf8,33897,1731897138784 2024-11-18T02:32:19,649 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:32:19,651 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T02:32:19,652 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T02:32:19,652 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is c4730a2bacf8,33897,1731897138784 2024-11-18T02:32:19,652 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@34648aa 2024-11-18T02:32:19,652 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T02:32:19,653 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44664, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T02:32:19,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33897 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T02:32:19,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33897 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T02:32:19,654 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33897 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:32:19,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33897 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-18T02:32:19,656 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T02:32:19,656 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:19,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33897 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-18T02:32:19,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:32:19,657 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T02:32:19,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741835_1011 (size=381) 2024-11-18T02:32:19,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741835_1011 (size=381) 2024-11-18T02:32:19,665 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bd839c3b8493b1fa541c64e468513ae0, NAME => 'TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264 2024-11-18T02:32:19,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741836_1012 (size=64) 2024-11-18T02:32:19,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741836_1012 (size=64) 2024-11-18T02:32:19,672 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:32:19,672 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing bd839c3b8493b1fa541c64e468513ae0, disabling compactions & flushes 2024-11-18T02:32:19,672 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:19,672 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:19,673 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. after waiting 0 ms 2024-11-18T02:32:19,673 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:19,673 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:19,673 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for bd839c3b8493b1fa541c64e468513ae0: Waiting for close lock at 1731897139672Disabling compacts and flushes for region at 1731897139672Disabling writes for close at 1731897139673 (+1 ms)Writing region close event to WAL at 1731897139673Closed at 1731897139673 2024-11-18T02:32:19,674 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T02:32:19,674 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731897139674"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731897139674"}]},"ts":"1731897139674"} 2024-11-18T02:32:19,676 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T02:32:19,677 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T02:32:19,677 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731897139677"}]},"ts":"1731897139677"} 2024-11-18T02:32:19,679 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-18T02:32:19,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd839c3b8493b1fa541c64e468513ae0, ASSIGN}] 2024-11-18T02:32:19,680 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd839c3b8493b1fa541c64e468513ae0, ASSIGN 2024-11-18T02:32:19,681 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd839c3b8493b1fa541c64e468513ae0, ASSIGN; state=OFFLINE, location=c4730a2bacf8,40979,1731897138825; forceNewPlan=false, retain=false 2024-11-18T02:32:19,832 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bd839c3b8493b1fa541c64e468513ae0, regionState=OPENING, regionLocation=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:19,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd839c3b8493b1fa541c64e468513ae0, ASSIGN because future has completed 2024-11-18T02:32:19,835 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd839c3b8493b1fa541c64e468513ae0, server=c4730a2bacf8,40979,1731897138825}] 2024-11-18T02:32:19,991 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:19,991 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bd839c3b8493b1fa541c64e468513ae0, NAME => 'TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:32:19,991 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,991 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:32:19,991 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,992 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,993 INFO [StoreOpener-bd839c3b8493b1fa541c64e468513ae0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,994 INFO [StoreOpener-bd839c3b8493b1fa541c64e468513ae0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bd839c3b8493b1fa541c64e468513ae0 columnFamilyName info 2024-11-18T02:32:19,994 DEBUG [StoreOpener-bd839c3b8493b1fa541c64e468513ae0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:19,995 INFO [StoreOpener-bd839c3b8493b1fa541c64e468513ae0-1 {}] regionserver.HStore(327): Store=bd839c3b8493b1fa541c64e468513ae0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:32:19,995 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,995 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,996 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,996 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,996 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,997 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:19,999 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:32:19,999 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bd839c3b8493b1fa541c64e468513ae0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854671, jitterRate=0.08677056431770325}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T02:32:20,000 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:20,000 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bd839c3b8493b1fa541c64e468513ae0: Running coprocessor pre-open hook at 1731897139992Writing region info on filesystem at 1731897139992Initializing all the Stores at 1731897139992Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897139992Cleaning up temporary data from old regions at 1731897139996 (+4 ms)Running coprocessor post-open hooks at 1731897140000 (+4 ms)Region opened successfully at 1731897140000 2024-11-18T02:32:20,001 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., pid=6, masterSystemTime=1731897139987 2024-11-18T02:32:20,003 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:20,003 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:20,004 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bd839c3b8493b1fa541c64e468513ae0, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:20,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd839c3b8493b1fa541c64e468513ae0, server=c4730a2bacf8,40979,1731897138825 because future has completed 2024-11-18T02:32:20,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T02:32:20,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bd839c3b8493b1fa541c64e468513ae0, server=c4730a2bacf8,40979,1731897138825 in 172 msec 2024-11-18T02:32:20,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T02:32:20,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd839c3b8493b1fa541c64e468513ae0, ASSIGN in 330 msec 2024-11-18T02:32:20,013 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T02:32:20,013 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731897140013"}]},"ts":"1731897140013"} 2024-11-18T02:32:20,015 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-18T02:32:20,016 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T02:32:20,018 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 362 msec 2024-11-18T02:32:20,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:20,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:21,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:21,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:22,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:22,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:22,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:22,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,136 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T02:32:23,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:23,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:23,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:24,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:24,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:25,068 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T02:32:25,069 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-18T02:32:25,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:25,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:26,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:26,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:27,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:27,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:27,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:32:27,769 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T02:32:27,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:32:27,770 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T02:32:27,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-18T02:32:27,770 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T02:32:28,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:28,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:29,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:29,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:29,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T02:32:29,694 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-18T02:32:29,694 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-18T02:32:29,697 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-18T02:32:29,697 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:29,699 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., hostname=c4730a2bacf8,40979,1731897138825, seqNum=2] 2024-11-18T02:32:29,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:29,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd839c3b8493b1fa541c64e468513ae0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:32:29,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/e6b237b32730424b93aceb6f75b29711 is 1080, key is row0001/info:/1731897149700/Put/seqid=0 2024-11-18T02:32:29,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741837_1013 (size=12509) 2024-11-18T02:32:29,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741837_1013 (size=12509) 2024-11-18T02:32:29,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/e6b237b32730424b93aceb6f75b29711 2024-11-18T02:32:29,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/e6b237b32730424b93aceb6f75b29711 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e6b237b32730424b93aceb6f75b29711 2024-11-18T02:32:29,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e6b237b32730424b93aceb6f75b29711, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T02:32:29,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for bd839c3b8493b1fa541c64e468513ae0 in 37ms, sequenceid=11, compaction requested=false 2024-11-18T02:32:29,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd839c3b8493b1fa541c64e468513ae0: 2024-11-18T02:32:29,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:29,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd839c3b8493b1fa541c64e468513ae0 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-18T02:32:29,753 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/f060591dbe1b4837951413486650d946 is 1080, key is row0008/info:/1731897149712/Put/seqid=0 2024-11-18T02:32:29,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741838_1014 (size=29761) 2024-11-18T02:32:29,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741838_1014 (size=29761) 2024-11-18T02:32:29,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/f060591dbe1b4837951413486650d946 2024-11-18T02:32:29,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/f060591dbe1b4837951413486650d946 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f060591dbe1b4837951413486650d946 2024-11-18T02:32:29,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f060591dbe1b4837951413486650d946, entries=23, sequenceid=37, filesize=29.1 K 2024-11-18T02:32:29,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for bd839c3b8493b1fa541c64e468513ae0 in 24ms, sequenceid=37, compaction requested=false 2024-11-18T02:32:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd839c3b8493b1fa541c64e468513ae0: 2024-11-18T02:32:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-18T02:32:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f060591dbe1b4837951413486650d946 because midkey is the same as first or last row 2024-11-18T02:32:30,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:30,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:31,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:31,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:31,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:31,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd839c3b8493b1fa541c64e468513ae0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:32:31,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/ef6bc1a6b3ce4e4280b305802cc30345 is 1080, key is row0031/info:/1731897149750/Put/seqid=0 2024-11-18T02:32:31,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741839_1015 (size=12509) 2024-11-18T02:32:31,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741839_1015 (size=12509) 2024-11-18T02:32:31,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/ef6bc1a6b3ce4e4280b305802cc30345 2024-11-18T02:32:31,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/ef6bc1a6b3ce4e4280b305802cc30345 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/ef6bc1a6b3ce4e4280b305802cc30345 2024-11-18T02:32:31,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/ef6bc1a6b3ce4e4280b305802cc30345, entries=7, sequenceid=47, filesize=12.2 K 2024-11-18T02:32:31,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for bd839c3b8493b1fa541c64e468513ae0 in 21ms, sequenceid=47, compaction requested=true 2024-11-18T02:32:31,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd839c3b8493b1fa541c64e468513ae0: 2024-11-18T02:32:31,783 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-18T02:32:31,783 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:31,784 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f060591dbe1b4837951413486650d946 because midkey is the same as first or last row 2024-11-18T02:32:31,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bd839c3b8493b1fa541c64e468513ae0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:32:31,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:31,784 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:32:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:31,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd839c3b8493b1fa541c64e468513ae0 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-18T02:32:31,785 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:32:31,786 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1541): bd839c3b8493b1fa541c64e468513ae0/info is initiating minor compaction (all files) 2024-11-18T02:32:31,786 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bd839c3b8493b1fa541c64e468513ae0/info in TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:31,786 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e6b237b32730424b93aceb6f75b29711, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f060591dbe1b4837951413486650d946, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/ef6bc1a6b3ce4e4280b305802cc30345] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp, totalSize=53.5 K 2024-11-18T02:32:31,786 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting e6b237b32730424b93aceb6f75b29711, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731897149700 2024-11-18T02:32:31,787 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting f060591dbe1b4837951413486650d946, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731897149712 2024-11-18T02:32:31,787 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting ef6bc1a6b3ce4e4280b305802cc30345, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731897149750 2024-11-18T02:32:31,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/e09663a7f6c8419bbf276d0e2a162878 is 1080, key is row0038/info:/1731897151763/Put/seqid=0 2024-11-18T02:32:31,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741840_1016 (size=20064) 2024-11-18T02:32:31,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741840_1016 (size=20064) 2024-11-18T02:32:31,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/e09663a7f6c8419bbf276d0e2a162878 2024-11-18T02:32:31,801 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bd839c3b8493b1fa541c64e468513ae0#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:32:31,801 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/b7f1c5bbe6e64a74a13696588416c4ba is 1080, key is row0001/info:/1731897149700/Put/seqid=0 2024-11-18T02:32:31,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/e09663a7f6c8419bbf276d0e2a162878 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e09663a7f6c8419bbf276d0e2a162878 2024-11-18T02:32:31,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e09663a7f6c8419bbf276d0e2a162878, entries=14, sequenceid=64, filesize=19.6 K 2024-11-18T02:32:31,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for bd839c3b8493b1fa541c64e468513ae0 in 25ms, sequenceid=64, compaction requested=false 2024-11-18T02:32:31,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd839c3b8493b1fa541c64e468513ae0: 2024-11-18T02:32:31,809 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-11-18T02:32:31,809 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:31,809 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f060591dbe1b4837951413486650d946 because midkey is the same as first or last row 2024-11-18T02:32:31,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741841_1017 (size=44978) 2024-11-18T02:32:31,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741841_1017 (size=44978) 2024-11-18T02:32:31,817 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/b7f1c5bbe6e64a74a13696588416c4ba as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba 2024-11-18T02:32:31,823 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bd839c3b8493b1fa541c64e468513ae0/info of bd839c3b8493b1fa541c64e468513ae0 into b7f1c5bbe6e64a74a13696588416c4ba(size=43.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:32:31,823 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bd839c3b8493b1fa541c64e468513ae0: 2024-11-18T02:32:31,823 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., storeName=bd839c3b8493b1fa541c64e468513ae0/info, priority=13, startTime=1731897151784; duration=0sec 2024-11-18T02:32:31,823 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-18T02:32:31,823 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:31,823 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba because midkey is the same as first or last row 2024-11-18T02:32:31,824 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-18T02:32:31,824 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:31,824 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba because midkey is the same as first or last row 2024-11-18T02:32:31,824 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-18T02:32:31,824 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:31,824 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba because midkey is the same as first or last row 2024-11-18T02:32:31,824 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:31,824 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bd839c3b8493b1fa541c64e468513ae0:info 2024-11-18T02:32:32,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:32,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:33,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:33,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:33,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:33,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd839c3b8493b1fa541c64e468513ae0 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-18T02:32:33,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/f7a398c539604dffbf945ba3a1fa9ea2 is 1080, key is row0052/info:/1731897151786/Put/seqid=0 2024-11-18T02:32:33,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741842_1018 (size=20064) 2024-11-18T02:32:33,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741842_1018 (size=20064) 2024-11-18T02:32:33,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/f7a398c539604dffbf945ba3a1fa9ea2 2024-11-18T02:32:33,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/f7a398c539604dffbf945ba3a1fa9ea2 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f7a398c539604dffbf945ba3a1fa9ea2 2024-11-18T02:32:33,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f7a398c539604dffbf945ba3a1fa9ea2, entries=14, sequenceid=82, filesize=19.6 K 2024-11-18T02:32:33,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for bd839c3b8493b1fa541c64e468513ae0 in 23ms, sequenceid=82, compaction requested=true 2024-11-18T02:32:33,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd839c3b8493b1fa541c64e468513ae0: 2024-11-18T02:32:33,835 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-18T02:32:33,835 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:33,835 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba because midkey is the same as first or last row 2024-11-18T02:32:33,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bd839c3b8493b1fa541c64e468513ae0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:32:33,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:33,835 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:32:33,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:33,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd839c3b8493b1fa541c64e468513ae0 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-18T02:32:33,837 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:32:33,837 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1541): bd839c3b8493b1fa541c64e468513ae0/info is initiating minor compaction (all files) 2024-11-18T02:32:33,837 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bd839c3b8493b1fa541c64e468513ae0/info in TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:33,837 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e09663a7f6c8419bbf276d0e2a162878, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f7a398c539604dffbf945ba3a1fa9ea2] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp, totalSize=83.1 K 2024-11-18T02:32:33,837 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7f1c5bbe6e64a74a13696588416c4ba, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731897149700 2024-11-18T02:32:33,838 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting e09663a7f6c8419bbf276d0e2a162878, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1731897151763 2024-11-18T02:32:33,838 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting f7a398c539604dffbf945ba3a1fa9ea2, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731897151786 2024-11-18T02:32:33,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/b10701c4f8f14baeb73c698b31966b4f is 1080, key is row0066/info:/1731897153813/Put/seqid=0 2024-11-18T02:32:33,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741843_1019 (size=20064) 2024-11-18T02:32:33,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741843_1019 (size=20064) 2024-11-18T02:32:33,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/b10701c4f8f14baeb73c698b31966b4f 2024-11-18T02:32:33,851 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bd839c3b8493b1fa541c64e468513ae0#info#compaction#61 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:32:33,852 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/fab4b7fc30d44b50949044d67c644223 is 1080, key is row0001/info:/1731897149700/Put/seqid=0 2024-11-18T02:32:33,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/b10701c4f8f14baeb73c698b31966b4f as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b10701c4f8f14baeb73c698b31966b4f 2024-11-18T02:32:33,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741844_1020 (size=75378) 2024-11-18T02:32:33,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741844_1020 (size=75378) 2024-11-18T02:32:33,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b10701c4f8f14baeb73c698b31966b4f, entries=14, sequenceid=99, filesize=19.6 K 2024-11-18T02:32:33,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for bd839c3b8493b1fa541c64e468513ae0 in 25ms, sequenceid=99, compaction requested=false 2024-11-18T02:32:33,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd839c3b8493b1fa541c64e468513ae0: 2024-11-18T02:32:33,860 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-11-18T02:32:33,860 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:33,860 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba because midkey is the same as first or last row 2024-11-18T02:32:33,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:33,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd839c3b8493b1fa541c64e468513ae0 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-18T02:32:33,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/51813917216e40bab300d8caa2929d24 is 1080, key is row0080/info:/1731897153836/Put/seqid=0 2024-11-18T02:32:33,867 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/fab4b7fc30d44b50949044d67c644223 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/fab4b7fc30d44b50949044d67c644223 2024-11-18T02:32:33,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741845_1021 (size=18987) 2024-11-18T02:32:33,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741845_1021 (size=18987) 2024-11-18T02:32:33,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/51813917216e40bab300d8caa2929d24 2024-11-18T02:32:33,874 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bd839c3b8493b1fa541c64e468513ae0/info of bd839c3b8493b1fa541c64e468513ae0 into fab4b7fc30d44b50949044d67c644223(size=73.6 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:32:33,874 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bd839c3b8493b1fa541c64e468513ae0: 2024-11-18T02:32:33,874 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., storeName=bd839c3b8493b1fa541c64e468513ae0/info, priority=13, startTime=1731897153835; duration=0sec 2024-11-18T02:32:33,874 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-18T02:32:33,874 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:33,874 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-18T02:32:33,874 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:33,874 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-18T02:32:33,874 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:33,875 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:33,875 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:33,875 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bd839c3b8493b1fa541c64e468513ae0:info 2024-11-18T02:32:33,876 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33897 {}] assignment.AssignmentManager(1355): Split request from c4730a2bacf8,40979,1731897138825, parent={ENCODED => bd839c3b8493b1fa541c64e468513ae0, NAME => 'TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-18T02:32:33,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/51813917216e40bab300d8caa2929d24 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/51813917216e40bab300d8caa2929d24 2024-11-18T02:32:33,882 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33897 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:33,883 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/51813917216e40bab300d8caa2929d24, entries=13, sequenceid=115, filesize=18.5 K 2024-11-18T02:32:33,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for bd839c3b8493b1fa541c64e468513ae0 in 24ms, sequenceid=115, compaction requested=true 2024-11-18T02:32:33,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd839c3b8493b1fa541c64e468513ae0: 2024-11-18T02:32:33,884 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-11-18T02:32:33,884 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:33,884 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-11-18T02:32:33,884 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:33,884 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.7 K, sizeToCheck=16.0 K 2024-11-18T02:32:33,884 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T02:32:33,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-18T02:32:33,886 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33897 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=03d4cf87ce3947d73844325f00579c2a, daughterB=954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:33,887 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=03d4cf87ce3947d73844325f00579c2a, daughterB=954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:33,887 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=03d4cf87ce3947d73844325f00579c2a, daughterB=954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:33,887 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=03d4cf87ce3947d73844325f00579c2a, daughterB=954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:33,888 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33897 {}] assignment.AssignmentManager(1355): Split request from c4730a2bacf8,40979,1731897138825, parent={ENCODED => bd839c3b8493b1fa541c64e468513ae0, NAME => 'TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-18T02:32:33,888 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33897 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:33,890 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33897 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=24418a5931f97c553be93e9d338ea48d, daughterB=178363f89a60ee18d3bdfd0ee83643d2 2024-11-18T02:32:33,890 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=24418a5931f97c553be93e9d338ea48d, daughterB=178363f89a60ee18d3bdfd0ee83643d2 held by pid=7 2024-11-18T02:32:33,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd839c3b8493b1fa541c64e468513ae0, UNASSIGN}] 2024-11-18T02:32:33,897 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-18T02:32:33,897 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd839c3b8493b1fa541c64e468513ae0, UNASSIGN 2024-11-18T02:32:33,897 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=24418a5931f97c553be93e9d338ea48d, daughterB=178363f89a60ee18d3bdfd0ee83643d2 2024-11-18T02:32:33,899 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=bd839c3b8493b1fa541c64e468513ae0, regionState=CLOSING, regionLocation=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:33,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd839c3b8493b1fa541c64e468513ae0, UNASSIGN because future has completed 2024-11-18T02:32:33,901 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-18T02:32:33,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd839c3b8493b1fa541c64e468513ae0, server=c4730a2bacf8,40979,1731897138825}] 2024-11-18T02:32:34,058 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,058 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-18T02:32:34,059 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing bd839c3b8493b1fa541c64e468513ae0, disabling compactions & flushes 2024-11-18T02:32:34,059 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:34,059 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:34,059 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. after waiting 0 ms 2024-11-18T02:32:34,059 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:34,059 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing bd839c3b8493b1fa541c64e468513ae0 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-18T02:32:34,063 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/d37c27c6609e49c3bb75c165d04234a5 is 1080, key is row0093/info:/1731897153866/Put/seqid=0 2024-11-18T02:32:34,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741846_1022 (size=9270) 2024-11-18T02:32:34,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741846_1022 (size=9270) 2024-11-18T02:32:34,069 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/d37c27c6609e49c3bb75c165d04234a5 2024-11-18T02:32:34,074 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/.tmp/info/d37c27c6609e49c3bb75c165d04234a5 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/d37c27c6609e49c3bb75c165d04234a5 2024-11-18T02:32:34,078 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/d37c27c6609e49c3bb75c165d04234a5, entries=4, sequenceid=123, filesize=9.1 K 2024-11-18T02:32:34,079 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for bd839c3b8493b1fa541c64e468513ae0 in 20ms, sequenceid=123, compaction requested=true 2024-11-18T02:32:34,080 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e6b237b32730424b93aceb6f75b29711, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f060591dbe1b4837951413486650d946, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/ef6bc1a6b3ce4e4280b305802cc30345, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e09663a7f6c8419bbf276d0e2a162878, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f7a398c539604dffbf945ba3a1fa9ea2] to archive 2024-11-18T02:32:34,081 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T02:32:34,082 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e6b237b32730424b93aceb6f75b29711 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e6b237b32730424b93aceb6f75b29711 2024-11-18T02:32:34,083 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f060591dbe1b4837951413486650d946 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f060591dbe1b4837951413486650d946 2024-11-18T02:32:34,084 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b7f1c5bbe6e64a74a13696588416c4ba 2024-11-18T02:32:34,085 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/ef6bc1a6b3ce4e4280b305802cc30345 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/ef6bc1a6b3ce4e4280b305802cc30345 2024-11-18T02:32:34,087 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e09663a7f6c8419bbf276d0e2a162878 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/e09663a7f6c8419bbf276d0e2a162878 2024-11-18T02:32:34,088 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f7a398c539604dffbf945ba3a1fa9ea2 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/f7a398c539604dffbf945ba3a1fa9ea2 2024-11-18T02:32:34,093 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-18T02:32:34,093 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. 2024-11-18T02:32:34,094 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for bd839c3b8493b1fa541c64e468513ae0: Waiting for close lock at 1731897154059Running coprocessor pre-close hooks at 1731897154059Disabling compacts and flushes for region at 1731897154059Disabling writes for close at 1731897154059Obtaining lock to block concurrent updates at 1731897154059Preparing flush snapshotting stores in bd839c3b8493b1fa541c64e468513ae0 at 1731897154059Finished memstore snapshotting TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., syncing WAL and waiting on mvcc, flushsize=dataSize=4304, getHeapSize=4848, getOffHeapSize=0, getCellsCount=4 at 1731897154059Flushing stores of TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. at 1731897154060 (+1 ms)Flushing bd839c3b8493b1fa541c64e468513ae0/info: creating writer at 1731897154060Flushing bd839c3b8493b1fa541c64e468513ae0/info: appending metadata at 1731897154063 (+3 ms)Flushing bd839c3b8493b1fa541c64e468513ae0/info: closing flushed file at 1731897154063Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@539ccea0: reopening flushed file at 1731897154073 (+10 ms)Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for bd839c3b8493b1fa541c64e468513ae0 in 20ms, sequenceid=123, compaction requested=true at 1731897154079 (+6 ms)Writing region close event to WAL at 1731897154090 (+11 ms)Running coprocessor post-close hooks at 1731897154093 (+3 ms)Closed at 1731897154093 2024-11-18T02:32:34,095 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,096 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=bd839c3b8493b1fa541c64e468513ae0, regionState=CLOSED 2024-11-18T02:32:34,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd839c3b8493b1fa541c64e468513ae0, server=c4730a2bacf8,40979,1731897138825 because future has completed 2024-11-18T02:32:34,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-18T02:32:34,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure bd839c3b8493b1fa541c64e468513ae0, server=c4730a2bacf8,40979,1731897138825 in 198 msec 2024-11-18T02:32:34,103 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-18T02:32:34,103 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd839c3b8493b1fa541c64e468513ae0, UNASSIGN in 207 msec 2024-11-18T02:32:34,111 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:34,115 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=bd839c3b8493b1fa541c64e468513ae0, threads=4 2024-11-18T02:32:34,118 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/51813917216e40bab300d8caa2929d24 for region: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,118 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b10701c4f8f14baeb73c698b31966b4f for region: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,118 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/d37c27c6609e49c3bb75c165d04234a5 for region: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,118 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/fab4b7fc30d44b50949044d67c644223 for region: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,128 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/d37c27c6609e49c3bb75c165d04234a5, top=true 2024-11-18T02:32:34,128 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b10701c4f8f14baeb73c698b31966b4f, top=true 2024-11-18T02:32:34,130 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/51813917216e40bab300d8caa2929d24, top=true 2024-11-18T02:32:34,134 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-d37c27c6609e49c3bb75c165d04234a5 for child: 954ac799c33efc422cbb0eeb28e56642, parent: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,134 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-b10701c4f8f14baeb73c698b31966b4f for child: 954ac799c33efc422cbb0eeb28e56642, parent: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741847_1023 (size=27) 2024-11-18T02:32:34,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741847_1023 (size=27) 2024-11-18T02:32:34,134 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/d37c27c6609e49c3bb75c165d04234a5 for region: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,134 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/b10701c4f8f14baeb73c698b31966b4f for region: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,137 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-51813917216e40bab300d8caa2929d24 for child: 954ac799c33efc422cbb0eeb28e56642, parent: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,137 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/51813917216e40bab300d8caa2929d24 for region: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741848_1024 (size=27) 2024-11-18T02:32:34,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741848_1024 (size=27) 2024-11-18T02:32:34,146 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/fab4b7fc30d44b50949044d67c644223 for region: bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:32:34,148 DEBUG [PEWorker-1 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region bd839c3b8493b1fa541c64e468513ae0 Daughter A: [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0] storefiles, Daughter B: [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-51813917216e40bab300d8caa2929d24, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-b10701c4f8f14baeb73c698b31966b4f, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-d37c27c6609e49c3bb75c165d04234a5, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0] storefiles. 2024-11-18T02:32:34,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741849_1025 (size=71) 2024-11-18T02:32:34,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741849_1025 (size=71) 2024-11-18T02:32:34,157 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:34,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741850_1026 (size=71) 2024-11-18T02:32:34,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741850_1026 (size=71) 2024-11-18T02:32:34,170 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:34,181 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-18T02:32:34,183 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-18T02:32:34,186 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731897154186"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731897154186"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731897154186"}]},"ts":"1731897154186"} 2024-11-18T02:32:34,186 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731897154186"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731897154186"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731897154186"}]},"ts":"1731897154186"} 2024-11-18T02:32:34,187 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731897154186"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731897154186"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731897154186"}]},"ts":"1731897154186"} 2024-11-18T02:32:34,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=03d4cf87ce3947d73844325f00579c2a, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=954ac799c33efc422cbb0eeb28e56642, ASSIGN}] 2024-11-18T02:32:34,207 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=954ac799c33efc422cbb0eeb28e56642, ASSIGN 2024-11-18T02:32:34,207 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=03d4cf87ce3947d73844325f00579c2a, ASSIGN 2024-11-18T02:32:34,208 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=954ac799c33efc422cbb0eeb28e56642, ASSIGN; state=SPLITTING_NEW, location=c4730a2bacf8,40979,1731897138825; forceNewPlan=false, retain=false 2024-11-18T02:32:34,208 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=03d4cf87ce3947d73844325f00579c2a, ASSIGN; state=SPLITTING_NEW, location=c4730a2bacf8,40979,1731897138825; forceNewPlan=false, retain=false 2024-11-18T02:32:34,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:34,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:34,359 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=03d4cf87ce3947d73844325f00579c2a, regionState=OPENING, regionLocation=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:34,359 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=954ac799c33efc422cbb0eeb28e56642, regionState=OPENING, regionLocation=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:34,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=954ac799c33efc422cbb0eeb28e56642, ASSIGN because future has completed 2024-11-18T02:32:34,362 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 954ac799c33efc422cbb0eeb28e56642, server=c4730a2bacf8,40979,1731897138825}] 2024-11-18T02:32:34,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=03d4cf87ce3947d73844325f00579c2a, ASSIGN because future has completed 2024-11-18T02:32:34,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 03d4cf87ce3947d73844325f00579c2a, server=c4730a2bacf8,40979,1731897138825}] 2024-11-18T02:32:34,517 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:32:34,517 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => 03d4cf87ce3947d73844325f00579c2a, NAME => 'TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-18T02:32:34,518 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,518 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:32:34,518 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,518 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,519 INFO [StoreOpener-03d4cf87ce3947d73844325f00579c2a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,520 INFO [StoreOpener-03d4cf87ce3947d73844325f00579c2a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 03d4cf87ce3947d73844325f00579c2a columnFamilyName info 2024-11-18T02:32:34,520 DEBUG [StoreOpener-03d4cf87ce3947d73844325f00579c2a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:34,530 DEBUG [StoreOpener-03d4cf87ce3947d73844325f00579c2a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0->hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/fab4b7fc30d44b50949044d67c644223-bottom 2024-11-18T02:32:34,530 INFO [StoreOpener-03d4cf87ce3947d73844325f00579c2a-1 {}] regionserver.HStore(327): Store=03d4cf87ce3947d73844325f00579c2a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:32:34,530 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,531 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,532 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,532 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,532 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,534 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,535 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened 03d4cf87ce3947d73844325f00579c2a; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874485, jitterRate=0.11196620762348175}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T02:32:34,535 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:32:34,535 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for 03d4cf87ce3947d73844325f00579c2a: Running coprocessor pre-open hook at 1731897154518Writing region info on filesystem at 1731897154518Initializing all the Stores at 1731897154519 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897154519Cleaning up temporary data from old regions at 1731897154532 (+13 ms)Running coprocessor post-open hooks at 1731897154535 (+3 ms)Region opened successfully at 1731897154535 2024-11-18T02:32:34,536 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a., pid=14, masterSystemTime=1731897154514 2024-11-18T02:32:34,536 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store 03d4cf87ce3947d73844325f00579c2a:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:32:34,536 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:34,536 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-18T02:32:34,537 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:32:34,537 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1541): 03d4cf87ce3947d73844325f00579c2a/info is initiating minor compaction (all files) 2024-11-18T02:32:34,537 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 03d4cf87ce3947d73844325f00579c2a/info in TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:32:34,537 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0->hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/fab4b7fc30d44b50949044d67c644223-bottom] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/.tmp, totalSize=73.6 K 2024-11-18T02:32:34,538 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731897149700 2024-11-18T02:32:34,538 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:32:34,538 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:32:34,538 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:32:34,538 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 954ac799c33efc422cbb0eeb28e56642, NAME => 'TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-18T02:32:34,539 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,539 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:32:34,539 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,539 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,539 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=03d4cf87ce3947d73844325f00579c2a, regionState=OPEN, openSeqNum=127, regionLocation=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:34,540 INFO [StoreOpener-954ac799c33efc422cbb0eeb28e56642-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,541 INFO [StoreOpener-954ac799c33efc422cbb0eeb28e56642-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 954ac799c33efc422cbb0eeb28e56642 columnFamilyName info 2024-11-18T02:32:34,541 DEBUG [StoreOpener-954ac799c33efc422cbb0eeb28e56642-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:32:34,541 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-18T02:32:34,541 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-18T02:32:34,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-18T02:32:34,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 03d4cf87ce3947d73844325f00579c2a, server=c4730a2bacf8,40979,1731897138825 because future has completed 2024-11-18T02:32:34,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-11-18T02:32:34,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 03d4cf87ce3947d73844325f00579c2a, server=c4730a2bacf8,40979,1731897138825 in 180 msec 2024-11-18T02:32:34,547 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=03d4cf87ce3947d73844325f00579c2a, ASSIGN in 340 msec 2024-11-18T02:32:34,553 DEBUG [StoreOpener-954ac799c33efc422cbb0eeb28e56642-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-51813917216e40bab300d8caa2929d24 2024-11-18T02:32:34,558 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 03d4cf87ce3947d73844325f00579c2a#info#compaction#64 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:32:34,558 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/.tmp/info/9b3cb0fe2f7f408298b4bd0f3df20a15 is 1080, key is row0001/info:/1731897149700/Put/seqid=0 2024-11-18T02:32:34,558 DEBUG [StoreOpener-954ac799c33efc422cbb0eeb28e56642-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-b10701c4f8f14baeb73c698b31966b4f 2024-11-18T02:32:34,563 DEBUG [StoreOpener-954ac799c33efc422cbb0eeb28e56642-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-d37c27c6609e49c3bb75c165d04234a5 2024-11-18T02:32:34,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/info/f43262afccbf4054a8780c0496eafb97 is 193, key is TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642./info:regioninfo/1731897154359/Put/seqid=0 2024-11-18T02:32:34,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741851_1027 (size=70862) 2024-11-18T02:32:34,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741851_1027 (size=70862) 2024-11-18T02:32:34,567 DEBUG [StoreOpener-954ac799c33efc422cbb0eeb28e56642-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0->hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/fab4b7fc30d44b50949044d67c644223-top 2024-11-18T02:32:34,568 INFO [StoreOpener-954ac799c33efc422cbb0eeb28e56642-1 {}] regionserver.HStore(327): Store=954ac799c33efc422cbb0eeb28e56642/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:32:34,568 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,569 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741852_1028 (size=9847) 2024-11-18T02:32:34,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741852_1028 (size=9847) 2024-11-18T02:32:34,570 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,571 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,571 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/info/f43262afccbf4054a8780c0496eafb97 2024-11-18T02:32:34,573 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,573 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/.tmp/info/9b3cb0fe2f7f408298b4bd0f3df20a15 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/info/9b3cb0fe2f7f408298b4bd0f3df20a15 2024-11-18T02:32:34,574 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 954ac799c33efc422cbb0eeb28e56642; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884207, jitterRate=0.1243276447057724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T02:32:34,574 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:34,574 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 954ac799c33efc422cbb0eeb28e56642: Running coprocessor pre-open hook at 1731897154539Writing region info on filesystem at 1731897154539Initializing all the Stores at 1731897154540 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897154540Cleaning up temporary data from old regions at 1731897154571 (+31 ms)Running coprocessor post-open hooks at 1731897154574 (+3 ms)Region opened successfully at 1731897154574 2024-11-18T02:32:34,575 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., pid=13, masterSystemTime=1731897154514 2024-11-18T02:32:34,575 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 954ac799c33efc422cbb0eeb28e56642:info, priority=-2147483648, current under compaction store size is 2 2024-11-18T02:32:34,575 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:34,575 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-18T02:32:34,577 INFO [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:32:34,577 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.HStore(1541): 954ac799c33efc422cbb0eeb28e56642/info is initiating minor compaction (all files) 2024-11-18T02:32:34,577 INFO [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 954ac799c33efc422cbb0eeb28e56642/info in TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:32:34,578 INFO [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0->hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/fab4b7fc30d44b50949044d67c644223-top, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-b10701c4f8f14baeb73c698b31966b4f, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-51813917216e40bab300d8caa2929d24, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-d37c27c6609e49c3bb75c165d04234a5] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp, totalSize=120.8 K 2024-11-18T02:32:34,578 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] compactions.Compactor(225): Compacting fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731897149700 2024-11-18T02:32:34,579 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-b10701c4f8f14baeb73c698b31966b4f, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731897153813 2024-11-18T02:32:34,579 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-51813917216e40bab300d8caa2929d24, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731897153836 2024-11-18T02:32:34,580 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-d37c27c6609e49c3bb75c165d04234a5, keycount=4, bloomtype=ROW, size=9.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731897153866 2024-11-18T02:32:34,580 DEBUG [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:32:34,580 INFO [RS_OPEN_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:32:34,580 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 03d4cf87ce3947d73844325f00579c2a/info of 03d4cf87ce3947d73844325f00579c2a into 9b3cb0fe2f7f408298b4bd0f3df20a15(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:32:34,580 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 03d4cf87ce3947d73844325f00579c2a: 2024-11-18T02:32:34,580 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a., storeName=03d4cf87ce3947d73844325f00579c2a/info, priority=15, startTime=1731897154536; duration=0sec 2024-11-18T02:32:34,581 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:34,581 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 03d4cf87ce3947d73844325f00579c2a:info 2024-11-18T02:32:34,581 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=954ac799c33efc422cbb0eeb28e56642, regionState=OPEN, openSeqNum=127, regionLocation=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:34,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 954ac799c33efc422cbb0eeb28e56642, server=c4730a2bacf8,40979,1731897138825 because future has completed 2024-11-18T02:32:34,588 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-18T02:32:34,588 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure 954ac799c33efc422cbb0eeb28e56642, server=c4730a2bacf8,40979,1731897138825 in 223 msec 2024-11-18T02:32:34,591 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=7 2024-11-18T02:32:34,591 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=954ac799c33efc422cbb0eeb28e56642, ASSIGN in 382 msec 2024-11-18T02:32:34,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=03d4cf87ce3947d73844325f00579c2a, daughterB=954ac799c33efc422cbb0eeb28e56642 in 709 msec 2024-11-18T02:32:34,593 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=24418a5931f97c553be93e9d338ea48d, daughterB=178363f89a60ee18d3bdfd0ee83643d2 2024-11-18T02:32:34,593 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=24418a5931f97c553be93e9d338ea48d, daughterB=178363f89a60ee18d3bdfd0ee83643d2 2024-11-18T02:32:34,593 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=24418a5931f97c553be93e9d338ea48d, daughterB=178363f89a60ee18d3bdfd0ee83643d2 2024-11-18T02:32:34,594 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => bd839c3b8493b1fa541c64e468513ae0, NAME => 'TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-18T02:32:34,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd839c3b8493b1fa541c64e468513ae0, daughterA=24418a5931f97c553be93e9d338ea48d, daughterB=178363f89a60ee18d3bdfd0ee83643d2 in 706 msec 2024-11-18T02:32:34,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/ns/0caf9a3494af45df862078e062cde907 is 43, key is default/ns:d/1731897139612/Put/seqid=0 2024-11-18T02:32:34,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741853_1029 (size=5153) 2024-11-18T02:32:34,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741853_1029 (size=5153) 2024-11-18T02:32:34,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/ns/0caf9a3494af45df862078e062cde907 2024-11-18T02:32:34,611 INFO [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 954ac799c33efc422cbb0eeb28e56642#info#compaction#67 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:32:34,612 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/d5ae6d66d067434c9aa12cf97ed7b75a is 1080, key is row0062/info:/1731897151805/Put/seqid=0 2024-11-18T02:32:34,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741854_1030 (size=43081) 2024-11-18T02:32:34,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741854_1030 (size=43081) 2024-11-18T02:32:34,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/table/c7997d6617624f35b651fca5537549ea is 65, key is TestLogRolling-testLogRolling/table:state/1731897140013/Put/seqid=0 2024-11-18T02:32:34,625 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/d5ae6d66d067434c9aa12cf97ed7b75a as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d5ae6d66d067434c9aa12cf97ed7b75a 2024-11-18T02:32:34,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741855_1031 (size=5340) 2024-11-18T02:32:34,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741855_1031 (size=5340) 2024-11-18T02:32:34,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/table/c7997d6617624f35b651fca5537549ea 2024-11-18T02:32:34,631 INFO [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 954ac799c33efc422cbb0eeb28e56642/info of 954ac799c33efc422cbb0eeb28e56642 into d5ae6d66d067434c9aa12cf97ed7b75a(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:32:34,631 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:34,631 INFO [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., storeName=954ac799c33efc422cbb0eeb28e56642/info, priority=12, startTime=1731897154575; duration=0sec 2024-11-18T02:32:34,631 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:34,631 DEBUG [RS:0;c4730a2bacf8:40979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 954ac799c33efc422cbb0eeb28e56642:info 2024-11-18T02:32:34,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/info/f43262afccbf4054a8780c0496eafb97 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/info/f43262afccbf4054a8780c0496eafb97 2024-11-18T02:32:34,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/info/f43262afccbf4054a8780c0496eafb97, entries=30, sequenceid=17, filesize=9.6 K 2024-11-18T02:32:34,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/ns/0caf9a3494af45df862078e062cde907 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/ns/0caf9a3494af45df862078e062cde907 2024-11-18T02:32:34,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/ns/0caf9a3494af45df862078e062cde907, entries=2, sequenceid=17, filesize=5.0 K 2024-11-18T02:32:34,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/table/c7997d6617624f35b651fca5537549ea as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/table/c7997d6617624f35b651fca5537549ea 2024-11-18T02:32:34,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/table/c7997d6617624f35b651fca5537549ea, entries=2, sequenceid=17, filesize=5.2 K 2024-11-18T02:32:34,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 107ms, sequenceid=17, compaction requested=false 2024-11-18T02:32:34,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T02:32:35,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:35,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:35,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:39902 deadline: 1731897165873, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. is not online on c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:35,898 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., hostname=c4730a2bacf8,40979,1731897138825, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., hostname=c4730a2bacf8,40979,1731897138825, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. is not online on c4730a2bacf8,40979,1731897138825 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T02:32:35,899 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., hostname=c4730a2bacf8,40979,1731897138825, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0. is not online on c4730a2bacf8,40979,1731897138825 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T02:32:35,899 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731897139654.bd839c3b8493b1fa541c64e468513ae0., hostname=c4730a2bacf8,40979,1731897138825, seqNum=2 from cache 2024-11-18T02:32:36,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:36,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:37,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:37,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:38,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:38,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:39,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,094 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:39,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:39,637 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T02:32:39,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:39,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T02:32:40,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:40,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:41,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:41,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:42,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:42,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:43,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:43,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:44,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:44,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:45,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:45,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:45,969 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., hostname=c4730a2bacf8,40979,1731897138825, seqNum=127] 2024-11-18T02:32:45,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:45,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:32:45,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/4a6fbbb0fa134954bbe0d6198f95bff7 is 1080, key is row0097/info:/1731897165970/Put/seqid=0 2024-11-18T02:32:45,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741856_1032 (size=12516) 2024-11-18T02:32:45,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741856_1032 (size=12516) 2024-11-18T02:32:45,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/4a6fbbb0fa134954bbe0d6198f95bff7 2024-11-18T02:32:45,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/4a6fbbb0fa134954bbe0d6198f95bff7 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4a6fbbb0fa134954bbe0d6198f95bff7 2024-11-18T02:32:46,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4a6fbbb0fa134954bbe0d6198f95bff7, entries=7, sequenceid=137, filesize=12.2 K 2024-11-18T02:32:46,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 954ac799c33efc422cbb0eeb28e56642 in 25ms, sequenceid=137, compaction requested=false 2024-11-18T02:32:46,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:46,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-18T02:32:46,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/b470d629d30f4baf999342387d118601 is 1080, key is row0104/info:/1731897165981/Put/seqid=0 2024-11-18T02:32:46,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741857_1033 (size=23316) 2024-11-18T02:32:46,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741857_1033 (size=23316) 2024-11-18T02:32:46,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/b470d629d30f4baf999342387d118601 2024-11-18T02:32:46,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/b470d629d30f4baf999342387d118601 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b470d629d30f4baf999342387d118601 2024-11-18T02:32:46,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b470d629d30f4baf999342387d118601, entries=17, sequenceid=157, filesize=22.8 K 2024-11-18T02:32:46,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=8.41 KB/8608 for 954ac799c33efc422cbb0eeb28e56642 in 21ms, sequenceid=157, compaction requested=true 2024-11-18T02:32:46,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:46,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 954ac799c33efc422cbb0eeb28e56642:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:32:46,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:46,027 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:32:46,028 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 78913 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:32:46,028 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1541): 954ac799c33efc422cbb0eeb28e56642/info is initiating minor compaction (all files) 2024-11-18T02:32:46,028 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 954ac799c33efc422cbb0eeb28e56642/info in TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:32:46,028 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d5ae6d66d067434c9aa12cf97ed7b75a, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4a6fbbb0fa134954bbe0d6198f95bff7, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b470d629d30f4baf999342387d118601] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp, totalSize=77.1 K 2024-11-18T02:32:46,029 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5ae6d66d067434c9aa12cf97ed7b75a, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731897151805 2024-11-18T02:32:46,029 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a6fbbb0fa134954bbe0d6198f95bff7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731897165970 2024-11-18T02:32:46,029 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting b470d629d30f4baf999342387d118601, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1731897165981 2024-11-18T02:32:46,039 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 954ac799c33efc422cbb0eeb28e56642#info#compaction#71 average throughput is 60.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:32:46,039 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/d13c6dde521d434095efe8cc461fd048 is 1080, key is row0062/info:/1731897151805/Put/seqid=0 2024-11-18T02:32:46,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741858_1034 (size=69123) 2024-11-18T02:32:46,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741858_1034 (size=69123) 2024-11-18T02:32:46,049 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/d13c6dde521d434095efe8cc461fd048 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d13c6dde521d434095efe8cc461fd048 2024-11-18T02:32:46,056 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 954ac799c33efc422cbb0eeb28e56642/info of 954ac799c33efc422cbb0eeb28e56642 into d13c6dde521d434095efe8cc461fd048(size=67.5 K), total size for store is 67.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:32:46,056 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:46,056 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., storeName=954ac799c33efc422cbb0eeb28e56642/info, priority=13, startTime=1731897166027; duration=0sec 2024-11-18T02:32:46,056 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:46,056 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 954ac799c33efc422cbb0eeb28e56642:info 2024-11-18T02:32:46,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:46,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:47,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:47,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:48,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:48,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-18T02:32:48,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/900a0bbc69534c1d98f585b56af20837 is 1080, key is row0121/info:/1731897166007/Put/seqid=0 2024-11-18T02:32:48,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741859_1035 (size=14672) 2024-11-18T02:32:48,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741859_1035 (size=14672) 2024-11-18T02:32:48,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/900a0bbc69534c1d98f585b56af20837 2024-11-18T02:32:48,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/900a0bbc69534c1d98f585b56af20837 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/900a0bbc69534c1d98f585b56af20837 2024-11-18T02:32:48,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/900a0bbc69534c1d98f585b56af20837, entries=9, sequenceid=170, filesize=14.3 K 2024-11-18T02:32:48,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=15.76 KB/16140 for 954ac799c33efc422cbb0eeb28e56642 in 22ms, sequenceid=170, compaction requested=false 2024-11-18T02:32:48,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:48,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:48,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-18T02:32:48,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/ef3428a2b98344e2bc0e42e2fd5edccc is 1080, key is row0130/info:/1731897168023/Put/seqid=0 2024-11-18T02:32:48,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741860_1036 (size=22238) 2024-11-18T02:32:48,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741860_1036 (size=22238) 2024-11-18T02:32:48,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/ef3428a2b98344e2bc0e42e2fd5edccc 2024-11-18T02:32:48,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/ef3428a2b98344e2bc0e42e2fd5edccc as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ef3428a2b98344e2bc0e42e2fd5edccc 2024-11-18T02:32:48,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ef3428a2b98344e2bc0e42e2fd5edccc, entries=16, sequenceid=189, filesize=21.7 K 2024-11-18T02:32:48,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=12.61 KB/12912 for 954ac799c33efc422cbb0eeb28e56642 in 19ms, sequenceid=189, compaction requested=true 2024-11-18T02:32:48,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:48,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 954ac799c33efc422cbb0eeb28e56642:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:32:48,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:48,066 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:32:48,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:48,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-18T02:32:48,067 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 106033 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:32:48,067 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1541): 954ac799c33efc422cbb0eeb28e56642/info is initiating minor compaction (all files) 2024-11-18T02:32:48,067 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 954ac799c33efc422cbb0eeb28e56642/info in TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:32:48,067 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d13c6dde521d434095efe8cc461fd048, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/900a0bbc69534c1d98f585b56af20837, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ef3428a2b98344e2bc0e42e2fd5edccc] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp, totalSize=103.5 K 2024-11-18T02:32:48,068 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting d13c6dde521d434095efe8cc461fd048, keycount=59, bloomtype=ROW, size=67.5 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1731897151805 2024-11-18T02:32:48,068 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 900a0bbc69534c1d98f585b56af20837, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731897166007 2024-11-18T02:32:48,068 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting ef3428a2b98344e2bc0e42e2fd5edccc, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731897168023 2024-11-18T02:32:48,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/b904586ceffe4d61a9e8c1c9806f6999 is 1080, key is row0146/info:/1731897168046/Put/seqid=0 2024-11-18T02:32:48,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741861_1037 (size=19000) 2024-11-18T02:32:48,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741861_1037 (size=19000) 2024-11-18T02:32:48,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/b904586ceffe4d61a9e8c1c9806f6999 2024-11-18T02:32:48,080 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 954ac799c33efc422cbb0eeb28e56642#info#compaction#75 average throughput is 43.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:32:48,080 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/db275e4b237c447992f9f95e741c13f4 is 1080, key is row0062/info:/1731897151805/Put/seqid=0 2024-11-18T02:32:48,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/b904586ceffe4d61a9e8c1c9806f6999 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b904586ceffe4d61a9e8c1c9806f6999 2024-11-18T02:32:48,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b904586ceffe4d61a9e8c1c9806f6999, entries=13, sequenceid=205, filesize=18.6 K 2024-11-18T02:32:48,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=2.10 KB/2152 for 954ac799c33efc422cbb0eeb28e56642 in 20ms, sequenceid=205, compaction requested=false 2024-11-18T02:32:48,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:48,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741862_1038 (size=96252) 2024-11-18T02:32:48,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741862_1038 (size=96252) 2024-11-18T02:32:48,095 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/db275e4b237c447992f9f95e741c13f4 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/db275e4b237c447992f9f95e741c13f4 2024-11-18T02:32:48,100 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 954ac799c33efc422cbb0eeb28e56642/info of 954ac799c33efc422cbb0eeb28e56642 into db275e4b237c447992f9f95e741c13f4(size=94.0 K), total size for store is 112.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:32:48,100 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:48,100 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., storeName=954ac799c33efc422cbb0eeb28e56642/info, priority=13, startTime=1731897168066; duration=0sec 2024-11-18T02:32:48,100 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:48,100 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 954ac799c33efc422cbb0eeb28e56642:info 2024-11-18T02:32:48,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:48,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:48,769 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T02:32:49,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:49,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:50,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:50,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:32:50,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/6c1a4ef0d73643a5b8601d6177a17a2b is 1080, key is row0159/info:/1731897168068/Put/seqid=0 2024-11-18T02:32:50,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741863_1039 (size=12516) 2024-11-18T02:32:50,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741863_1039 (size=12516) 2024-11-18T02:32:50,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/6c1a4ef0d73643a5b8601d6177a17a2b 2024-11-18T02:32:50,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/6c1a4ef0d73643a5b8601d6177a17a2b as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/6c1a4ef0d73643a5b8601d6177a17a2b 2024-11-18T02:32:50,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/6c1a4ef0d73643a5b8601d6177a17a2b, entries=7, sequenceid=216, filesize=12.2 K 2024-11-18T02:32:50,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for 954ac799c33efc422cbb0eeb28e56642 in 47ms, sequenceid=216, compaction requested=true 2024-11-18T02:32:50,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:50,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 954ac799c33efc422cbb0eeb28e56642:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:32:50,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:50,127 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:32:50,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:50,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-18T02:32:50,128 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127768 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:32:50,128 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1541): 954ac799c33efc422cbb0eeb28e56642/info is initiating minor compaction (all files) 2024-11-18T02:32:50,128 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 954ac799c33efc422cbb0eeb28e56642/info in TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:32:50,128 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/db275e4b237c447992f9f95e741c13f4, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b904586ceffe4d61a9e8c1c9806f6999, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/6c1a4ef0d73643a5b8601d6177a17a2b] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp, totalSize=124.8 K 2024-11-18T02:32:50,129 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting db275e4b237c447992f9f95e741c13f4, keycount=84, bloomtype=ROW, size=94.0 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731897151805 2024-11-18T02:32:50,129 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting b904586ceffe4d61a9e8c1c9806f6999, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731897168046 2024-11-18T02:32:50,129 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c1a4ef0d73643a5b8601d6177a17a2b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731897168068 2024-11-18T02:32:50,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/4c56ec95e65c42ea9947fab7e88aa6f7 is 1080, key is row0166/info:/1731897170080/Put/seqid=0 2024-11-18T02:32:50,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741864_1040 (size=24394) 2024-11-18T02:32:50,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741864_1040 (size=24394) 2024-11-18T02:32:50,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/4c56ec95e65c42ea9947fab7e88aa6f7 2024-11-18T02:32:50,147 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 954ac799c33efc422cbb0eeb28e56642#info#compaction#78 average throughput is 35.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:32:50,147 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/e119351bf48c4b1a95ff8463d75af78f is 1080, key is row0062/info:/1731897151805/Put/seqid=0 2024-11-18T02:32:50,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/4c56ec95e65c42ea9947fab7e88aa6f7 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4c56ec95e65c42ea9947fab7e88aa6f7 2024-11-18T02:32:50,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741865_1041 (size=117918) 2024-11-18T02:32:50,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741865_1041 (size=117918) 2024-11-18T02:32:50,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4c56ec95e65c42ea9947fab7e88aa6f7, entries=18, sequenceid=237, filesize=23.8 K 2024-11-18T02:32:50,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=9.46 KB/9684 for 954ac799c33efc422cbb0eeb28e56642 in 32ms, sequenceid=237, compaction requested=false 2024-11-18T02:32:50,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:50,161 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/e119351bf48c4b1a95ff8463d75af78f as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/e119351bf48c4b1a95ff8463d75af78f 2024-11-18T02:32:50,166 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 954ac799c33efc422cbb0eeb28e56642/info of 954ac799c33efc422cbb0eeb28e56642 into e119351bf48c4b1a95ff8463d75af78f(size=115.2 K), total size for store is 139.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:32:50,166 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:50,167 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., storeName=954ac799c33efc422cbb0eeb28e56642/info, priority=13, startTime=1731897170126; duration=0sec 2024-11-18T02:32:50,167 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:50,167 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 954ac799c33efc422cbb0eeb28e56642:info 2024-11-18T02:32:50,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:50,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:51,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:51,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:52,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:52,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-18T02:32:52,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/ab174dd1db0f427eb29cc03bcbc74a02 is 1080, key is row0184/info:/1731897170128/Put/seqid=0 2024-11-18T02:32:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741866_1042 (size=15750) 2024-11-18T02:32:52,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741866_1042 (size=15750) 2024-11-18T02:32:52,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/ab174dd1db0f427eb29cc03bcbc74a02 2024-11-18T02:32:52,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/ab174dd1db0f427eb29cc03bcbc74a02 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ab174dd1db0f427eb29cc03bcbc74a02 2024-11-18T02:32:52,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ab174dd1db0f427eb29cc03bcbc74a02, entries=10, sequenceid=251, filesize=15.4 K 2024-11-18T02:32:52,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=12.61 KB/12912 for 954ac799c33efc422cbb0eeb28e56642 in 20ms, sequenceid=251, compaction requested=true 2024-11-18T02:32:52,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:52,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 954ac799c33efc422cbb0eeb28e56642:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:32:52,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:52,168 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:32:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:32:52,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-18T02:32:52,169 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158062 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:32:52,169 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1541): 954ac799c33efc422cbb0eeb28e56642/info is initiating minor compaction (all files) 2024-11-18T02:32:52,169 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 954ac799c33efc422cbb0eeb28e56642/info in TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:32:52,169 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/e119351bf48c4b1a95ff8463d75af78f, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4c56ec95e65c42ea9947fab7e88aa6f7, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ab174dd1db0f427eb29cc03bcbc74a02] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp, totalSize=154.4 K 2024-11-18T02:32:52,169 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting e119351bf48c4b1a95ff8463d75af78f, keycount=104, bloomtype=ROW, size=115.2 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731897151805 2024-11-18T02:32:52,170 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4c56ec95e65c42ea9947fab7e88aa6f7, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731897170080 2024-11-18T02:32:52,170 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting ab174dd1db0f427eb29cc03bcbc74a02, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731897170128 2024-11-18T02:32:52,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/62b3b3c56f2c4ef4b2e814b997227024 is 1080, key is row0194/info:/1731897172148/Put/seqid=0 2024-11-18T02:32:52,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741867_1043 (size=19010) 2024-11-18T02:32:52,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741867_1043 (size=19010) 2024-11-18T02:32:52,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/62b3b3c56f2c4ef4b2e814b997227024 2024-11-18T02:32:52,192 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 954ac799c33efc422cbb0eeb28e56642#info#compaction#81 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:32:52,192 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/a0a8ab0537f84b77b1576bd53fd28e5d is 1080, key is row0062/info:/1731897151805/Put/seqid=0 2024-11-18T02:32:52,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/62b3b3c56f2c4ef4b2e814b997227024 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/62b3b3c56f2c4ef4b2e814b997227024 2024-11-18T02:32:52,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=954ac799c33efc422cbb0eeb28e56642, server=c4730a2bacf8,40979,1731897138825 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-18T02:32:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:39902 deadline: 1731897182198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=954ac799c33efc422cbb0eeb28e56642, server=c4730a2bacf8,40979,1731897138825 2024-11-18T02:32:52,199 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., hostname=c4730a2bacf8,40979,1731897138825, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., hostname=c4730a2bacf8,40979,1731897138825, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=954ac799c33efc422cbb0eeb28e56642, server=c4730a2bacf8,40979,1731897138825 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T02:32:52,200 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., hostname=c4730a2bacf8,40979,1731897138825, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=954ac799c33efc422cbb0eeb28e56642, server=c4730a2bacf8,40979,1731897138825 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T02:32:52,200 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., hostname=c4730a2bacf8,40979,1731897138825, seqNum=127 because the exception is null or not the one we care about 2024-11-18T02:32:52,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/62b3b3c56f2c4ef4b2e814b997227024, entries=13, sequenceid=267, filesize=18.6 K 2024-11-18T02:32:52,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741868_1044 (size=148409) 2024-11-18T02:32:52,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 954ac799c33efc422cbb0eeb28e56642 in 33ms, sequenceid=267, compaction requested=false 2024-11-18T02:32:52,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:52,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741868_1044 (size=148409) 2024-11-18T02:32:52,207 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/a0a8ab0537f84b77b1576bd53fd28e5d as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a0a8ab0537f84b77b1576bd53fd28e5d 2024-11-18T02:32:52,211 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 954ac799c33efc422cbb0eeb28e56642/info of 954ac799c33efc422cbb0eeb28e56642 into a0a8ab0537f84b77b1576bd53fd28e5d(size=144.9 K), total size for store is 163.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:32:52,211 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:32:52,211 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., storeName=954ac799c33efc422cbb0eeb28e56642/info, priority=13, startTime=1731897172168; duration=0sec 2024-11-18T02:32:52,211 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:32:52,211 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 954ac799c33efc422cbb0eeb28e56642:info 2024-11-18T02:32:52,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:52,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:53,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:53,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:54,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:54,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:55,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:55,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:56,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:56,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:57,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:57,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:58,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:58,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:59,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:59,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:32:59,641 INFO [master/c4730a2bacf8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T02:32:59,641 INFO [master/c4730a2bacf8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T02:33:00,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:00,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:01,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:01,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:02,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:33:02,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-18T02:33:02,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/a9b81725116c447698119c5027c36472 is 1080, key is row0207/info:/1731897172169/Put/seqid=0 2024-11-18T02:33:02,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741869_1045 (size=23333) 2024-11-18T02:33:02,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741869_1045 (size=23333) 2024-11-18T02:33:02,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/a9b81725116c447698119c5027c36472 2024-11-18T02:33:02,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/a9b81725116c447698119c5027c36472 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a9b81725116c447698119c5027c36472 2024-11-18T02:33:02,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a9b81725116c447698119c5027c36472, entries=17, sequenceid=288, filesize=22.8 K 2024-11-18T02:33:02,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=1.05 KB/1076 for 954ac799c33efc422cbb0eeb28e56642 in 22ms, sequenceid=288, compaction requested=true 2024-11-18T02:33:02,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:33:02,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 954ac799c33efc422cbb0eeb28e56642:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:33:02,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:33:02,267 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:33:02,268 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190752 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:33:02,268 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1541): 954ac799c33efc422cbb0eeb28e56642/info is initiating minor compaction (all files) 2024-11-18T02:33:02,268 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 954ac799c33efc422cbb0eeb28e56642/info in TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:33:02,268 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a0a8ab0537f84b77b1576bd53fd28e5d, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/62b3b3c56f2c4ef4b2e814b997227024, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a9b81725116c447698119c5027c36472] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp, totalSize=186.3 K 2024-11-18T02:33:02,269 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting a0a8ab0537f84b77b1576bd53fd28e5d, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731897151805 2024-11-18T02:33:02,269 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 62b3b3c56f2c4ef4b2e814b997227024, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1731897172148 2024-11-18T02:33:02,269 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting a9b81725116c447698119c5027c36472, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731897172169 2024-11-18T02:33:02,282 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 954ac799c33efc422cbb0eeb28e56642#info#compaction#83 average throughput is 41.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:33:02,283 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/8c67a9faa41c4824931a7dad2e2be57e is 1080, key is row0062/info:/1731897151805/Put/seqid=0 2024-11-18T02:33:02,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741870_1046 (size=180886) 2024-11-18T02:33:02,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741870_1046 (size=180886) 2024-11-18T02:33:02,294 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/8c67a9faa41c4824931a7dad2e2be57e as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/8c67a9faa41c4824931a7dad2e2be57e 2024-11-18T02:33:02,301 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 954ac799c33efc422cbb0eeb28e56642/info of 954ac799c33efc422cbb0eeb28e56642 into 8c67a9faa41c4824931a7dad2e2be57e(size=176.6 K), total size for store is 176.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:33:02,301 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:33:02,301 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., storeName=954ac799c33efc422cbb0eeb28e56642/info, priority=13, startTime=1731897182267; duration=0sec 2024-11-18T02:33:02,301 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:33:02,301 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 954ac799c33efc422cbb0eeb28e56642:info 2024-11-18T02:33:02,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:02,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:03,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:03,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:04,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:33:04,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T02:33:04,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/da5068de41a34acc97a287d0b90bada7 is 1080, key is row0224/info:/1731897182247/Put/seqid=0 2024-11-18T02:33:04,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741871_1047 (size=12523) 2024-11-18T02:33:04,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741871_1047 (size=12523) 2024-11-18T02:33:04,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/da5068de41a34acc97a287d0b90bada7 2024-11-18T02:33:04,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/da5068de41a34acc97a287d0b90bada7 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/da5068de41a34acc97a287d0b90bada7 2024-11-18T02:33:04,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/da5068de41a34acc97a287d0b90bada7, entries=7, sequenceid=299, filesize=12.2 K 2024-11-18T02:33:04,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 954ac799c33efc422cbb0eeb28e56642 in 21ms, sequenceid=299, compaction requested=false 2024-11-18T02:33:04,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:33:04,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40979 {}] regionserver.HRegion(8855): Flush requested on 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:33:04,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-18T02:33:04,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/5216ec3467b5459dac98ad43d28d348f is 1080, key is row0231/info:/1731897184258/Put/seqid=0 2024-11-18T02:33:04,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741872_1048 (size=21171) 2024-11-18T02:33:04,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741872_1048 (size=21171) 2024-11-18T02:33:04,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/5216ec3467b5459dac98ad43d28d348f 2024-11-18T02:33:04,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/5216ec3467b5459dac98ad43d28d348f as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/5216ec3467b5459dac98ad43d28d348f 2024-11-18T02:33:04,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/5216ec3467b5459dac98ad43d28d348f, entries=15, sequenceid=317, filesize=20.7 K 2024-11-18T02:33:04,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 954ac799c33efc422cbb0eeb28e56642 in 21ms, sequenceid=317, compaction requested=true 2024-11-18T02:33:04,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:33:04,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 954ac799c33efc422cbb0eeb28e56642:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T02:33:04,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:33:04,299 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T02:33:04,300 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 214580 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T02:33:04,300 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1541): 954ac799c33efc422cbb0eeb28e56642/info is initiating minor compaction (all files) 2024-11-18T02:33:04,300 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 954ac799c33efc422cbb0eeb28e56642/info in TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:33:04,300 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/8c67a9faa41c4824931a7dad2e2be57e, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/da5068de41a34acc97a287d0b90bada7, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/5216ec3467b5459dac98ad43d28d348f] into tmpdir=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp, totalSize=209.6 K 2024-11-18T02:33:04,301 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8c67a9faa41c4824931a7dad2e2be57e, keycount=162, bloomtype=ROW, size=176.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731897151805 2024-11-18T02:33:04,301 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting da5068de41a34acc97a287d0b90bada7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1731897182247 2024-11-18T02:33:04,301 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5216ec3467b5459dac98ad43d28d348f, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1731897184258 2024-11-18T02:33:04,313 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 954ac799c33efc422cbb0eeb28e56642#info#compaction#86 average throughput is 62.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T02:33:04,314 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/f812655cacd24748920804247b8f0831 is 1080, key is row0062/info:/1731897151805/Put/seqid=0 2024-11-18T02:33:04,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741873_1049 (size=204803) 2024-11-18T02:33:04,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741873_1049 (size=204803) 2024-11-18T02:33:04,322 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/f812655cacd24748920804247b8f0831 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/f812655cacd24748920804247b8f0831 2024-11-18T02:33:04,328 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 954ac799c33efc422cbb0eeb28e56642/info of 954ac799c33efc422cbb0eeb28e56642 into f812655cacd24748920804247b8f0831(size=200.0 K), total size for store is 200.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T02:33:04,328 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:33:04,328 INFO [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., storeName=954ac799c33efc422cbb0eeb28e56642/info, priority=13, startTime=1731897184299; duration=0sec 2024-11-18T02:33:04,328 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T02:33:04,328 DEBUG [RS:0;c4730a2bacf8:40979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 954ac799c33efc422cbb0eeb28e56642:info 2024-11-18T02:33:04,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:04,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:04,583 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-18T02:33:05,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:05,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:06,296 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-18T02:33:06,297 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C40979%2C1731897138825.1731897186296 2024-11-18T02:33:06,303 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,303 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,303 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,303 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,303 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,303 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.1731897139204 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.1731897186296 2024-11-18T02:33:06,304 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38699:38699),(127.0.0.1/127.0.0.1:44143:44143)] 2024-11-18T02:33:06,304 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.1731897139204 is not closed yet, will try archiving it next time 2024-11-18T02:33:06,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741833_1009 (size=315283) 2024-11-18T02:33:06,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741833_1009 (size=315283) 2024-11-18T02:33:06,307 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 03d4cf87ce3947d73844325f00579c2a: 2024-11-18T02:33:06,308 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 954ac799c33efc422cbb0eeb28e56642 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T02:33:06,311 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/740d8b2a817f4a06a8dfae56fa11d525 is 1080, key is row0246/info:/1731897184279/Put/seqid=0 2024-11-18T02:33:06,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741875_1051 (size=16839) 2024-11-18T02:33:06,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741875_1051 (size=16839) 2024-11-18T02:33:06,317 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/740d8b2a817f4a06a8dfae56fa11d525 2024-11-18T02:33:06,322 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/.tmp/info/740d8b2a817f4a06a8dfae56fa11d525 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/740d8b2a817f4a06a8dfae56fa11d525 2024-11-18T02:33:06,326 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/740d8b2a817f4a06a8dfae56fa11d525, entries=11, sequenceid=332, filesize=16.4 K 2024-11-18T02:33:06,327 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for 954ac799c33efc422cbb0eeb28e56642 in 20ms, sequenceid=332, compaction requested=false 2024-11-18T02:33:06,327 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 954ac799c33efc422cbb0eeb28e56642: 2024-11-18T02:33:06,327 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-18T02:33:06,331 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/info/1cc3bf720f974df7a2c28003c3d902b6 is 193, key is TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642./info:regioninfo/1731897154581/Put/seqid=0 2024-11-18T02:33:06,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741876_1052 (size=6223) 2024-11-18T02:33:06,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741876_1052 (size=6223) 2024-11-18T02:33:06,336 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/info/1cc3bf720f974df7a2c28003c3d902b6 2024-11-18T02:33:06,341 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/.tmp/info/1cc3bf720f974df7a2c28003c3d902b6 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/info/1cc3bf720f974df7a2c28003c3d902b6 2024-11-18T02:33:06,345 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/info/1cc3bf720f974df7a2c28003c3d902b6, entries=5, sequenceid=21, filesize=6.1 K 2024-11-18T02:33:06,346 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-11-18T02:33:06,346 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T02:33:06,346 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C40979%2C1731897138825.1731897186346 2024-11-18T02:33:06,353 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,353 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,353 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,353 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,353 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,353 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.1731897186296 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.1731897186346 2024-11-18T02:33:06,354 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38699:38699),(127.0.0.1/127.0.0.1:44143:44143)] 2024-11-18T02:33:06,354 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.1731897186296 is not closed yet, will try archiving it next time 2024-11-18T02:33:06,354 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.1731897139204 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/oldWALs/c4730a2bacf8%2C40979%2C1731897138825.1731897139204 2024-11-18T02:33:06,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741874_1050 (size=731) 2024-11-18T02:33:06,355 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T02:33:06,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741874_1050 (size=731) 2024-11-18T02:33:06,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:06,356 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/WALs/c4730a2bacf8,40979,1731897138825/c4730a2bacf8%2C40979%2C1731897138825.1731897186296 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/oldWALs/c4730a2bacf8%2C40979%2C1731897138825.1731897186296 2024-11-18T02:33:06,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:06,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T02:33:06,455 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:33:06,455 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:33:06,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:06,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:06,456 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T02:33:06,456 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T02:33:06,456 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=531359646, stopped=false 2024-11-18T02:33:06,456 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c4730a2bacf8,33897,1731897138784 2024-11-18T02:33:06,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:33:06,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:33:06,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:06,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:06,458 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:33:06,458 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:33:06,459 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:33:06,459 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:33:06,459 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:06,459 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:33:06,459 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c4730a2bacf8,40979,1731897138825' ***** 2024-11-18T02:33:06,459 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T02:33:06,459 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T02:33:06,460 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(3091): Received CLOSE for 03d4cf87ce3947d73844325f00579c2a 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(3091): Received CLOSE for 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(959): stopping server c4730a2bacf8,40979,1731897138825 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:33:06,460 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 03d4cf87ce3947d73844325f00579c2a, disabling compactions & flushes 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c4730a2bacf8:40979. 2024-11-18T02:33:06,460 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:33:06,460 DEBUG [RS:0;c4730a2bacf8:40979 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:33:06,460 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:33:06,460 DEBUG [RS:0;c4730a2bacf8:40979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:06,460 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. after waiting 0 ms 2024-11-18T02:33:06,460 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T02:33:06,460 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T02:33:06,461 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-18T02:33:06,461 DEBUG [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(1325): Online Regions={03d4cf87ce3947d73844325f00579c2a=TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a., 954ac799c33efc422cbb0eeb28e56642=TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T02:33:06,461 DEBUG [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(1351): Waiting on 03d4cf87ce3947d73844325f00579c2a, 1588230740, 954ac799c33efc422cbb0eeb28e56642 2024-11-18T02:33:06,461 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:33:06,461 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:33:06,461 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:33:06,461 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:33:06,461 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:33:06,461 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0->hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/fab4b7fc30d44b50949044d67c644223-bottom] to archive 2024-11-18T02:33:06,462 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T02:33:06,463 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:33:06,464 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c4730a2bacf8:33897 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T02:33:06,464 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-18T02:33:06,466 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-18T02:33:06,466 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:33:06,466 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:33:06,466 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897186461Running coprocessor pre-close hooks at 1731897186461Disabling compacts and flushes for region at 1731897186461Disabling writes for close at 1731897186461Writing region close event to WAL at 1731897186462 (+1 ms)Running coprocessor post-close hooks at 1731897186466 (+4 ms)Closed at 1731897186466 2024-11-18T02:33:06,466 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T02:33:06,467 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/03d4cf87ce3947d73844325f00579c2a/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-18T02:33:06,468 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:33:06,468 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 03d4cf87ce3947d73844325f00579c2a: Waiting for close lock at 1731897186460Running coprocessor pre-close hooks at 1731897186460Disabling compacts and flushes for region at 1731897186460Disabling writes for close at 1731897186460Writing region close event to WAL at 1731897186464 (+4 ms)Running coprocessor post-close hooks at 1731897186468 (+4 ms)Closed at 1731897186468 2024-11-18T02:33:06,468 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731897153882.03d4cf87ce3947d73844325f00579c2a. 2024-11-18T02:33:06,468 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 954ac799c33efc422cbb0eeb28e56642, disabling compactions & flushes 2024-11-18T02:33:06,468 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:33:06,468 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:33:06,468 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. after waiting 0 ms 2024-11-18T02:33:06,468 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:33:06,468 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0->hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/bd839c3b8493b1fa541c64e468513ae0/info/fab4b7fc30d44b50949044d67c644223-top, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-b10701c4f8f14baeb73c698b31966b4f, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-51813917216e40bab300d8caa2929d24, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d5ae6d66d067434c9aa12cf97ed7b75a, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-d37c27c6609e49c3bb75c165d04234a5, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4a6fbbb0fa134954bbe0d6198f95bff7, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d13c6dde521d434095efe8cc461fd048, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b470d629d30f4baf999342387d118601, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/900a0bbc69534c1d98f585b56af20837, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/db275e4b237c447992f9f95e741c13f4, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ef3428a2b98344e2bc0e42e2fd5edccc, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b904586ceffe4d61a9e8c1c9806f6999, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/e119351bf48c4b1a95ff8463d75af78f, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/6c1a4ef0d73643a5b8601d6177a17a2b, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4c56ec95e65c42ea9947fab7e88aa6f7, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a0a8ab0537f84b77b1576bd53fd28e5d, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ab174dd1db0f427eb29cc03bcbc74a02, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/62b3b3c56f2c4ef4b2e814b997227024, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/8c67a9faa41c4824931a7dad2e2be57e, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a9b81725116c447698119c5027c36472, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/da5068de41a34acc97a287d0b90bada7, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/5216ec3467b5459dac98ad43d28d348f] to archive 2024-11-18T02:33:06,469 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T02:33:06,470 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/fab4b7fc30d44b50949044d67c644223.bd839c3b8493b1fa541c64e468513ae0 2024-11-18T02:33:06,472 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-b10701c4f8f14baeb73c698b31966b4f to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-b10701c4f8f14baeb73c698b31966b4f 2024-11-18T02:33:06,473 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-51813917216e40bab300d8caa2929d24 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-51813917216e40bab300d8caa2929d24 2024-11-18T02:33:06,474 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d5ae6d66d067434c9aa12cf97ed7b75a to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d5ae6d66d067434c9aa12cf97ed7b75a 2024-11-18T02:33:06,475 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-d37c27c6609e49c3bb75c165d04234a5 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/TestLogRolling-testLogRolling=bd839c3b8493b1fa541c64e468513ae0-d37c27c6609e49c3bb75c165d04234a5 2024-11-18T02:33:06,476 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4a6fbbb0fa134954bbe0d6198f95bff7 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4a6fbbb0fa134954bbe0d6198f95bff7 2024-11-18T02:33:06,477 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d13c6dde521d434095efe8cc461fd048 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/d13c6dde521d434095efe8cc461fd048 2024-11-18T02:33:06,478 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b470d629d30f4baf999342387d118601 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b470d629d30f4baf999342387d118601 2024-11-18T02:33:06,479 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/900a0bbc69534c1d98f585b56af20837 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/900a0bbc69534c1d98f585b56af20837 2024-11-18T02:33:06,480 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/db275e4b237c447992f9f95e741c13f4 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/db275e4b237c447992f9f95e741c13f4 2024-11-18T02:33:06,481 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ef3428a2b98344e2bc0e42e2fd5edccc to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ef3428a2b98344e2bc0e42e2fd5edccc 2024-11-18T02:33:06,482 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b904586ceffe4d61a9e8c1c9806f6999 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/b904586ceffe4d61a9e8c1c9806f6999 2024-11-18T02:33:06,483 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/e119351bf48c4b1a95ff8463d75af78f to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/e119351bf48c4b1a95ff8463d75af78f 2024-11-18T02:33:06,484 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/6c1a4ef0d73643a5b8601d6177a17a2b to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/6c1a4ef0d73643a5b8601d6177a17a2b 2024-11-18T02:33:06,485 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4c56ec95e65c42ea9947fab7e88aa6f7 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/4c56ec95e65c42ea9947fab7e88aa6f7 2024-11-18T02:33:06,486 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a0a8ab0537f84b77b1576bd53fd28e5d to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a0a8ab0537f84b77b1576bd53fd28e5d 2024-11-18T02:33:06,487 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ab174dd1db0f427eb29cc03bcbc74a02 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/ab174dd1db0f427eb29cc03bcbc74a02 2024-11-18T02:33:06,487 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/62b3b3c56f2c4ef4b2e814b997227024 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/62b3b3c56f2c4ef4b2e814b997227024 2024-11-18T02:33:06,488 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/8c67a9faa41c4824931a7dad2e2be57e to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/8c67a9faa41c4824931a7dad2e2be57e 2024-11-18T02:33:06,489 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a9b81725116c447698119c5027c36472 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/a9b81725116c447698119c5027c36472 2024-11-18T02:33:06,490 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/da5068de41a34acc97a287d0b90bada7 to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/da5068de41a34acc97a287d0b90bada7 2024-11-18T02:33:06,491 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/5216ec3467b5459dac98ad43d28d348f to hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/archive/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/info/5216ec3467b5459dac98ad43d28d348f 2024-11-18T02:33:06,491 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d5ae6d66d067434c9aa12cf97ed7b75a=43081, 4a6fbbb0fa134954bbe0d6198f95bff7=12516, d13c6dde521d434095efe8cc461fd048=69123, b470d629d30f4baf999342387d118601=23316, 900a0bbc69534c1d98f585b56af20837=14672, db275e4b237c447992f9f95e741c13f4=96252, ef3428a2b98344e2bc0e42e2fd5edccc=22238, b904586ceffe4d61a9e8c1c9806f6999=19000, e119351bf48c4b1a95ff8463d75af78f=117918, 6c1a4ef0d73643a5b8601d6177a17a2b=12516, 4c56ec95e65c42ea9947fab7e88aa6f7=24394, a0a8ab0537f84b77b1576bd53fd28e5d=148409, ab174dd1db0f427eb29cc03bcbc74a02=15750, 62b3b3c56f2c4ef4b2e814b997227024=19010, 8c67a9faa41c4824931a7dad2e2be57e=180886, a9b81725116c447698119c5027c36472=23333, da5068de41a34acc97a287d0b90bada7=12523, 5216ec3467b5459dac98ad43d28d348f=21171] 2024-11-18T02:33:06,495 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/data/default/TestLogRolling-testLogRolling/954ac799c33efc422cbb0eeb28e56642/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-11-18T02:33:06,495 INFO [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:33:06,495 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 954ac799c33efc422cbb0eeb28e56642: Waiting for close lock at 1731897186468Running coprocessor pre-close hooks at 1731897186468Disabling compacts and flushes for region at 1731897186468Disabling writes for close at 1731897186468Writing region close event to WAL at 1731897186492 (+24 ms)Running coprocessor post-close hooks at 1731897186495 (+3 ms)Closed at 1731897186495 2024-11-18T02:33:06,495 DEBUG [RS_CLOSE_REGION-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731897153882.954ac799c33efc422cbb0eeb28e56642. 2024-11-18T02:33:06,661 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(976): stopping server c4730a2bacf8,40979,1731897138825; all regions closed. 2024-11-18T02:33:06,661 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,661 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,662 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,662 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,662 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741834_1010 (size=8107) 2024-11-18T02:33:06,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741834_1010 (size=8107) 2024-11-18T02:33:06,666 DEBUG [RS:0;c4730a2bacf8:40979 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/oldWALs 2024-11-18T02:33:06,666 INFO [RS:0;c4730a2bacf8:40979 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C40979%2C1731897138825.meta:.meta(num 1731897139576) 2024-11-18T02:33:06,666 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,666 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,666 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,666 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,667 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741877_1053 (size=780) 2024-11-18T02:33:06,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741877_1053 (size=780) 2024-11-18T02:33:06,670 DEBUG [RS:0;c4730a2bacf8:40979 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/oldWALs 2024-11-18T02:33:06,670 INFO [RS:0;c4730a2bacf8:40979 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C40979%2C1731897138825:(num 1731897186346) 2024-11-18T02:33:06,670 DEBUG [RS:0;c4730a2bacf8:40979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:06,670 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:33:06,670 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:33:06,670 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.ChoreService(370): Chore service for: regionserver/c4730a2bacf8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T02:33:06,671 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:33:06,671 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:33:06,671 INFO [RS:0;c4730a2bacf8:40979 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40979 2024-11-18T02:33:06,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c4730a2bacf8,40979,1731897138825 2024-11-18T02:33:06,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:33:06,673 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:33:06,675 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c4730a2bacf8,40979,1731897138825] 2024-11-18T02:33:06,676 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c4730a2bacf8,40979,1731897138825 already deleted, retry=false 2024-11-18T02:33:06,676 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c4730a2bacf8,40979,1731897138825 expired; onlineServers=0 2024-11-18T02:33:06,676 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c4730a2bacf8,33897,1731897138784' ***** 2024-11-18T02:33:06,676 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T02:33:06,676 INFO [M:0;c4730a2bacf8:33897 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:33:06,676 INFO [M:0;c4730a2bacf8:33897 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:33:06,676 DEBUG [M:0;c4730a2bacf8:33897 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T02:33:06,676 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T02:33:06,676 DEBUG [M:0;c4730a2bacf8:33897 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T02:33:06,676 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897138979 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897138979,5,FailOnTimeoutGroup] 2024-11-18T02:33:06,676 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897138979 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897138979,5,FailOnTimeoutGroup] 2024-11-18T02:33:06,677 INFO [M:0;c4730a2bacf8:33897 {}] hbase.ChoreService(370): Chore service for: master/c4730a2bacf8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T02:33:06,677 INFO [M:0;c4730a2bacf8:33897 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:33:06,677 DEBUG [M:0;c4730a2bacf8:33897 {}] master.HMaster(1795): Stopping service threads 2024-11-18T02:33:06,677 INFO [M:0;c4730a2bacf8:33897 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T02:33:06,677 INFO [M:0;c4730a2bacf8:33897 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:33:06,677 INFO [M:0;c4730a2bacf8:33897 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T02:33:06,677 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T02:33:06,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T02:33:06,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:06,678 DEBUG [M:0;c4730a2bacf8:33897 {}] zookeeper.ZKUtil(347): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T02:33:06,678 WARN [M:0;c4730a2bacf8:33897 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T02:33:06,679 INFO [M:0;c4730a2bacf8:33897 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/.lastflushedseqids 2024-11-18T02:33:06,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741878_1054 (size=228) 2024-11-18T02:33:06,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741878_1054 (size=228) 2024-11-18T02:33:06,684 INFO [M:0;c4730a2bacf8:33897 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T02:33:06,684 INFO [M:0;c4730a2bacf8:33897 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T02:33:06,684 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:33:06,684 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:06,684 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:06,684 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:33:06,684 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:06,684 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.68 KB heapSize=65.90 KB 2024-11-18T02:33:06,700 DEBUG [M:0;c4730a2bacf8:33897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/673a6f95162140bdb543085c5ed21fba is 82, key is hbase:meta,,1/info:regioninfo/1731897139598/Put/seqid=0 2024-11-18T02:33:06,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741879_1055 (size=5672) 2024-11-18T02:33:06,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741879_1055 (size=5672) 2024-11-18T02:33:06,704 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/673a6f95162140bdb543085c5ed21fba 2024-11-18T02:33:06,722 DEBUG [M:0;c4730a2bacf8:33897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93a96e7eefd74e43a8366e9f061a77ea is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731897140017/Put/seqid=0 2024-11-18T02:33:06,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741880_1056 (size=7679) 2024-11-18T02:33:06,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741880_1056 (size=7679) 2024-11-18T02:33:06,727 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.08 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93a96e7eefd74e43a8366e9f061a77ea 2024-11-18T02:33:06,731 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 93a96e7eefd74e43a8366e9f061a77ea 2024-11-18T02:33:06,746 DEBUG [M:0;c4730a2bacf8:33897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6aaf12992270493c95a16f0cd055c8af is 69, key is c4730a2bacf8,40979,1731897138825/rs:state/1731897139061/Put/seqid=0 2024-11-18T02:33:06,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741881_1057 (size=5156) 2024-11-18T02:33:06,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741881_1057 (size=5156) 2024-11-18T02:33:06,751 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6aaf12992270493c95a16f0cd055c8af 2024-11-18T02:33:06,769 DEBUG [M:0;c4730a2bacf8:33897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f6050ffff9ef4bdbb727ba0fe7d14286 is 52, key is load_balancer_on/state:d/1731897139650/Put/seqid=0 2024-11-18T02:33:06,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741882_1058 (size=5056) 2024-11-18T02:33:06,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741882_1058 (size=5056) 2024-11-18T02:33:06,774 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f6050ffff9ef4bdbb727ba0fe7d14286 2024-11-18T02:33:06,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:33:06,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40979-0x10128eb08e30001, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:33:06,775 INFO [RS:0;c4730a2bacf8:40979 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:33:06,775 INFO [RS:0;c4730a2bacf8:40979 {}] regionserver.HRegionServer(1031): Exiting; stopping=c4730a2bacf8,40979,1731897138825; zookeeper connection closed. 2024-11-18T02:33:06,775 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@55252641 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@55252641 2024-11-18T02:33:06,776 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T02:33:06,779 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/673a6f95162140bdb543085c5ed21fba as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/673a6f95162140bdb543085c5ed21fba 2024-11-18T02:33:06,784 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/673a6f95162140bdb543085c5ed21fba, entries=8, sequenceid=129, filesize=5.5 K 2024-11-18T02:33:06,784 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93a96e7eefd74e43a8366e9f061a77ea as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/93a96e7eefd74e43a8366e9f061a77ea 2024-11-18T02:33:06,789 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 93a96e7eefd74e43a8366e9f061a77ea 2024-11-18T02:33:06,789 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/93a96e7eefd74e43a8366e9f061a77ea, entries=14, sequenceid=129, filesize=7.5 K 2024-11-18T02:33:06,790 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6aaf12992270493c95a16f0cd055c8af as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6aaf12992270493c95a16f0cd055c8af 2024-11-18T02:33:06,794 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6aaf12992270493c95a16f0cd055c8af, entries=1, sequenceid=129, filesize=5.0 K 2024-11-18T02:33:06,795 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f6050ffff9ef4bdbb727ba0fe7d14286 as hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f6050ffff9ef4bdbb727ba0fe7d14286 2024-11-18T02:33:06,799 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38321/user/jenkins/test-data/d7cd0786-b42c-8751-da70-d8e8d0270264/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f6050ffff9ef4bdbb727ba0fe7d14286, entries=1, sequenceid=129, filesize=4.9 K 2024-11-18T02:33:06,800 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.68 KB/54973, heapSize ~65.84 KB/67416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=129, compaction requested=false 2024-11-18T02:33:06,802 INFO [M:0;c4730a2bacf8:33897 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:06,802 DEBUG [M:0;c4730a2bacf8:33897 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897186684Disabling compacts and flushes for region at 1731897186684Disabling writes for close at 1731897186684Obtaining lock to block concurrent updates at 1731897186684Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731897186684Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54973, getHeapSize=67416, getOffHeapSize=0, getCellsCount=152 at 1731897186684Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731897186685 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731897186685Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731897186699 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731897186699Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731897186708 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731897186722 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731897186722Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731897186732 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731897186745 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731897186745Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731897186755 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731897186768 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731897186768Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bbb1ed2: reopening flushed file at 1731897186779 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f58665e: reopening flushed file at 1731897186784 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35813ff5: reopening flushed file at 1731897186789 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2be44391: reopening flushed file at 1731897186794 (+5 ms)Finished flush of dataSize ~53.68 KB/54973, heapSize ~65.84 KB/67416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=129, compaction requested=false at 1731897186800 (+6 ms)Writing region close event to WAL at 1731897186802 (+2 ms)Closed at 1731897186802 2024-11-18T02:33:06,806 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,806 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,806 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,806 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,806 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:06,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41201 is added to blk_1073741830_1006 (size=63903) 2024-11-18T02:33:06,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44657 is added to blk_1073741830_1006 (size=63903) 2024-11-18T02:33:06,809 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:33:06,809 INFO [M:0;c4730a2bacf8:33897 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T02:33:06,809 INFO [M:0;c4730a2bacf8:33897 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33897 2024-11-18T02:33:06,809 INFO [M:0;c4730a2bacf8:33897 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:33:06,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:33:06,911 INFO [M:0;c4730a2bacf8:33897 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:33:06,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33897-0x10128eb08e30000, quorum=127.0.0.1:58446, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:33:06,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4445ac53{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:33:06,914 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64358886{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:33:06,914 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:33:06,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e4bbe36{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:33:06,914 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1204fb24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.log.dir/,STOPPED} 2024-11-18T02:33:06,916 WARN [BP-505935065-172.17.0.2-1731897138112 heartbeating to localhost/127.0.0.1:38321 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:33:06,916 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:33:06,916 WARN [BP-505935065-172.17.0.2-1731897138112 heartbeating to localhost/127.0.0.1:38321 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-505935065-172.17.0.2-1731897138112 (Datanode Uuid 20b1e98a-2cf5-4f9e-83a9-8cf40e588edd) service to localhost/127.0.0.1:38321 2024-11-18T02:33:06,916 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:33:06,916 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/data/data3/current/BP-505935065-172.17.0.2-1731897138112 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:33:06,916 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/data/data4/current/BP-505935065-172.17.0.2-1731897138112 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:33:06,917 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:33:06,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@719b1e37{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:33:06,920 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34accf12{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:33:06,920 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:33:06,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3868302b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:33:06,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@412b5320{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.log.dir/,STOPPED} 2024-11-18T02:33:06,922 WARN [BP-505935065-172.17.0.2-1731897138112 heartbeating to localhost/127.0.0.1:38321 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:33:06,922 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:33:06,922 WARN [BP-505935065-172.17.0.2-1731897138112 heartbeating to localhost/127.0.0.1:38321 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-505935065-172.17.0.2-1731897138112 (Datanode Uuid b7dad9ef-5a9a-4db9-ba3b-9ed5cd025541) service to localhost/127.0.0.1:38321 2024-11-18T02:33:06,922 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:33:06,922 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/data/data1/current/BP-505935065-172.17.0.2-1731897138112 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:33:06,922 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/cluster_5b65d94e-3a9e-c0d8-e1f6-9d0cc76c1e01/data/data2/current/BP-505935065-172.17.0.2-1731897138112 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:33:06,923 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:33:06,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3bc081d8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:33:06,929 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63e2e387{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:33:06,929 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:33:06,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2124b505{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:33:06,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4181d37d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.log.dir/,STOPPED} 2024-11-18T02:33:06,936 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T02:33:06,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T02:33:06,975 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=233 (was 207) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38321 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:38321 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38321 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38321 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38321 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:38321 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:38321 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:38321 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38321 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=42 (was 44), ProcessCount=11 (was 11), AvailableMemoryMB=2804 (was 2865) 2024-11-18T02:33:06,983 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=233, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=42, ProcessCount=11, AvailableMemoryMB=2805 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.log.dir so I do NOT create it in target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59876bf2-74e6-6c54-24fc-19505e8c6b1f/hadoop.tmp.dir so I do NOT create it in target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2, deleteOnExit=true 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/test.cache.data in system properties and HBase conf 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/hadoop.log.dir in system properties and HBase conf 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T02:33:06,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T02:33:06,985 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/nfs.dump.dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/java.io.tmpdir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T02:33:06,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T02:33:06,998 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:33:07,063 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:33:07,067 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:33:07,068 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:33:07,068 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:33:07,068 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:33:07,069 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:33:07,069 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e290644{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:33:07,069 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ac034ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:33:07,074 INFO [regionserver/c4730a2bacf8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:33:07,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c0b3275{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/java.io.tmpdir/jetty-localhost-35893-hadoop-hdfs-3_4_1-tests_jar-_-any-15590088034844422196/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:33:07,182 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d7cc0b1{HTTP/1.1, (http/1.1)}{localhost:35893} 2024-11-18T02:33:07,183 INFO [Time-limited test {}] server.Server(415): Started @285639ms 2024-11-18T02:33:07,195 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T02:33:07,248 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:33:07,251 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:33:07,251 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:33:07,251 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:33:07,251 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T02:33:07,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f9090c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:33:07,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60fdf071{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:33:07,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:07,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:07,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17951be7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/java.io.tmpdir/jetty-localhost-38175-hadoop-hdfs-3_4_1-tests_jar-_-any-18266810947864015510/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:33:07,367 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e818af2{HTTP/1.1, (http/1.1)}{localhost:38175} 2024-11-18T02:33:07,367 INFO [Time-limited test {}] server.Server(415): Started @285824ms 2024-11-18T02:33:07,369 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:33:07,397 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T02:33:07,400 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T02:33:07,400 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T02:33:07,400 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T02:33:07,400 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T02:33:07,401 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a1af98b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/hadoop.log.dir/,AVAILABLE} 2024-11-18T02:33:07,401 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bb82bb7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T02:33:07,470 WARN [Thread-2468 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/data/data2/current/BP-1855444151-172.17.0.2-1731897187004/current, will proceed with Du for space computation calculation, 2024-11-18T02:33:07,470 WARN [Thread-2467 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/data/data1/current/BP-1855444151-172.17.0.2-1731897187004/current, will proceed with Du for space computation calculation, 2024-11-18T02:33:07,491 WARN [Thread-2446 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:33:07,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf08a2124340227a8 with lease ID 0x98f75a872616d200: Processing first storage report for DS-d36eef8e-8b29-48d7-81a7-a34d139a96bb from datanode DatanodeRegistration(127.0.0.1:44889, datanodeUuid=571c92b7-60b0-43b7-899e-d9df4e767393, infoPort=42829, infoSecurePort=0, ipcPort=39327, storageInfo=lv=-57;cid=testClusterID;nsid=1374016301;c=1731897187004) 2024-11-18T02:33:07,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf08a2124340227a8 with lease ID 0x98f75a872616d200: from storage DS-d36eef8e-8b29-48d7-81a7-a34d139a96bb node DatanodeRegistration(127.0.0.1:44889, datanodeUuid=571c92b7-60b0-43b7-899e-d9df4e767393, infoPort=42829, infoSecurePort=0, ipcPort=39327, storageInfo=lv=-57;cid=testClusterID;nsid=1374016301;c=1731897187004), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:33:07,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf08a2124340227a8 with lease ID 0x98f75a872616d200: Processing first storage report for DS-4504aba6-5514-4e2c-8e57-f0296a3a0de1 from datanode DatanodeRegistration(127.0.0.1:44889, datanodeUuid=571c92b7-60b0-43b7-899e-d9df4e767393, infoPort=42829, infoSecurePort=0, ipcPort=39327, storageInfo=lv=-57;cid=testClusterID;nsid=1374016301;c=1731897187004) 2024-11-18T02:33:07,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf08a2124340227a8 with lease ID 0x98f75a872616d200: from storage DS-4504aba6-5514-4e2c-8e57-f0296a3a0de1 node DatanodeRegistration(127.0.0.1:44889, datanodeUuid=571c92b7-60b0-43b7-899e-d9df4e767393, infoPort=42829, infoSecurePort=0, ipcPort=39327, storageInfo=lv=-57;cid=testClusterID;nsid=1374016301;c=1731897187004), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:33:07,525 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@23e03366{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/java.io.tmpdir/jetty-localhost-35337-hadoop-hdfs-3_4_1-tests_jar-_-any-6969956475047213842/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:33:07,525 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fb7753b{HTTP/1.1, (http/1.1)}{localhost:35337} 2024-11-18T02:33:07,525 INFO [Time-limited test {}] server.Server(415): Started @285981ms 2024-11-18T02:33:07,526 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T02:33:07,615 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/data/data3/current/BP-1855444151-172.17.0.2-1731897187004/current, will proceed with Du for space computation calculation, 2024-11-18T02:33:07,616 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/data/data4/current/BP-1855444151-172.17.0.2-1731897187004/current, will proceed with Du for space computation calculation, 2024-11-18T02:33:07,631 WARN [Thread-2482 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T02:33:07,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9e56c3426dc4b818 with lease ID 0x98f75a872616d201: Processing first storage report for DS-f5579d55-728a-4bbc-8e13-8adfcaac6465 from datanode DatanodeRegistration(127.0.0.1:34605, datanodeUuid=f037842b-4864-4b36-98b0-2730ca785b0e, infoPort=36763, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=1374016301;c=1731897187004) 2024-11-18T02:33:07,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e56c3426dc4b818 with lease ID 0x98f75a872616d201: from storage DS-f5579d55-728a-4bbc-8e13-8adfcaac6465 node DatanodeRegistration(127.0.0.1:34605, datanodeUuid=f037842b-4864-4b36-98b0-2730ca785b0e, infoPort=36763, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=1374016301;c=1731897187004), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:33:07,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9e56c3426dc4b818 with lease ID 0x98f75a872616d201: Processing first storage report for DS-9a099166-9513-444c-9625-bfa19e43aa67 from datanode DatanodeRegistration(127.0.0.1:34605, datanodeUuid=f037842b-4864-4b36-98b0-2730ca785b0e, infoPort=36763, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=1374016301;c=1731897187004) 2024-11-18T02:33:07,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e56c3426dc4b818 with lease ID 0x98f75a872616d201: from storage DS-9a099166-9513-444c-9625-bfa19e43aa67 node DatanodeRegistration(127.0.0.1:34605, datanodeUuid=f037842b-4864-4b36-98b0-2730ca785b0e, infoPort=36763, infoSecurePort=0, ipcPort=33807, storageInfo=lv=-57;cid=testClusterID;nsid=1374016301;c=1731897187004), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T02:33:07,647 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707 2024-11-18T02:33:07,650 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/zookeeper_0, clientPort=52545, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T02:33:07,650 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52545 2024-11-18T02:33:07,651 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:33:07,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:33:07,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:33:07,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741825_1001 (size=7) 2024-11-18T02:33:07,660 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803 with version=8 2024-11-18T02:33:07,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33317/user/jenkins/test-data/a877179e-35ad-e1c1-ccb8-a6868c560dd4/hbase-staging 2024-11-18T02:33:07,663 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:33:07,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:33:07,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:33:07,663 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:33:07,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:33:07,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:33:07,663 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T02:33:07,663 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:33:07,664 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33589 2024-11-18T02:33:07,665 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33589 connecting to ZooKeeper ensemble=127.0.0.1:52545 2024-11-18T02:33:07,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:335890x0, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:33:07,671 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33589-0x10128ebc7d10000 connected 2024-11-18T02:33:07,687 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:33:07,689 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:33:07,691 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:33:07,691 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803, hbase.cluster.distributed=false 2024-11-18T02:33:07,692 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:33:07,692 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33589 2024-11-18T02:33:07,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33589 2024-11-18T02:33:07,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33589 2024-11-18T02:33:07,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33589 2024-11-18T02:33:07,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33589 2024-11-18T02:33:07,708 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c4730a2bacf8:0 server-side Connection retries=45 2024-11-18T02:33:07,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:33:07,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T02:33:07,708 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T02:33:07,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T02:33:07,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T02:33:07,708 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T02:33:07,708 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T02:33:07,709 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44241 2024-11-18T02:33:07,710 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44241 connecting to ZooKeeper ensemble=127.0.0.1:52545 2024-11-18T02:33:07,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:33:07,712 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:33:07,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:442410x0, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T02:33:07,715 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:442410x0, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:33:07,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44241-0x10128ebc7d10001 connected 2024-11-18T02:33:07,716 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T02:33:07,716 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T02:33:07,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T02:33:07,718 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T02:33:07,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44241 2024-11-18T02:33:07,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44241 2024-11-18T02:33:07,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44241 2024-11-18T02:33:07,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44241 2024-11-18T02:33:07,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44241 2024-11-18T02:33:07,730 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c4730a2bacf8:33589 2024-11-18T02:33:07,732 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c4730a2bacf8,33589,1731897187662 2024-11-18T02:33:07,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:33:07,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:33:07,735 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c4730a2bacf8,33589,1731897187662 2024-11-18T02:33:07,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T02:33:07,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,737 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T02:33:07,737 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c4730a2bacf8,33589,1731897187662 from backup master directory 2024-11-18T02:33:07,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c4730a2bacf8,33589,1731897187662 2024-11-18T02:33:07,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:33:07,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T02:33:07,739 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:33:07,739 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c4730a2bacf8,33589,1731897187662 2024-11-18T02:33:07,742 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/hbase.id] with ID: 78b655eb-f3ba-4391-b297-7676d77f4e04 2024-11-18T02:33:07,742 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/.tmp/hbase.id 2024-11-18T02:33:07,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:33:07,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741826_1002 (size=42) 2024-11-18T02:33:07,749 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/.tmp/hbase.id]:[hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/hbase.id] 2024-11-18T02:33:07,759 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:33:07,760 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T02:33:07,761 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T02:33:07,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:33:07,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741827_1003 (size=196) 2024-11-18T02:33:07,769 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T02:33:07,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:33:07,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T02:33:07,770 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T02:33:07,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T02:33:07,770 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:33:07,770 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-18T02:33:07,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:33:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741828_1004 (size=1189) 2024-11-18T02:33:07,776 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store 2024-11-18T02:33:07,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:33:07,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741829_1005 (size=34) 2024-11-18T02:33:07,782 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:33:07,782 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:33:07,782 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:07,782 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:07,782 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:33:07,782 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:07,782 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:07,782 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897187782Disabling compacts and flushes for region at 1731897187782Disabling writes for close at 1731897187782Writing region close event to WAL at 1731897187782Closed at 1731897187782 2024-11-18T02:33:07,783 WARN [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/.initializing 2024-11-18T02:33:07,783 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/WALs/c4730a2bacf8,33589,1731897187662 2024-11-18T02:33:07,785 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C33589%2C1731897187662, suffix=, logDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/WALs/c4730a2bacf8,33589,1731897187662, archiveDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/oldWALs, maxLogs=10 2024-11-18T02:33:07,785 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C33589%2C1731897187662.1731897187785 2024-11-18T02:33:07,789 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/WALs/c4730a2bacf8,33589,1731897187662/c4730a2bacf8%2C33589%2C1731897187662.1731897187785 2024-11-18T02:33:07,790 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36763:36763),(127.0.0.1/127.0.0.1:42829:42829)] 2024-11-18T02:33:07,790 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:33:07,791 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:33:07,791 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,791 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,792 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T02:33:07,793 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:07,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:33:07,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T02:33:07,794 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:07,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:33:07,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T02:33:07,795 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:07,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:33:07,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T02:33:07,797 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:07,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T02:33:07,797 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,798 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,798 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,799 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,799 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,799 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T02:33:07,800 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T02:33:07,802 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:33:07,803 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740873, jitterRate=-0.05793219804763794}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T02:33:07,803 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731897187791Initializing all the Stores at 1731897187791Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897187791Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897187792 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897187792Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897187792Cleaning up temporary data from old regions at 1731897187799 (+7 ms)Region opened successfully at 1731897187803 (+4 ms) 2024-11-18T02:33:07,807 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T02:33:07,810 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dcdde12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:33:07,811 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T02:33:07,811 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T02:33:07,811 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T02:33:07,811 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T02:33:07,812 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T02:33:07,812 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T02:33:07,812 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T02:33:07,815 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T02:33:07,816 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T02:33:07,818 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T02:33:07,819 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T02:33:07,820 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T02:33:07,821 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T02:33:07,821 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T02:33:07,822 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T02:33:07,823 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T02:33:07,824 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T02:33:07,825 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T02:33:07,827 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T02:33:07,830 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T02:33:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:33:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T02:33:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,832 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c4730a2bacf8,33589,1731897187662, sessionid=0x10128ebc7d10000, setting cluster-up flag (Was=false) 2024-11-18T02:33:07,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,841 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T02:33:07,842 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,33589,1731897187662 2024-11-18T02:33:07,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:07,854 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T02:33:07,855 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c4730a2bacf8,33589,1731897187662 2024-11-18T02:33:07,856 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T02:33:07,858 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T02:33:07,858 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T02:33:07,858 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T02:33:07,858 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c4730a2bacf8,33589,1731897187662 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T02:33:07,859 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:33:07,859 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:33:07,859 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:33:07,859 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=5, maxPoolSize=5 2024-11-18T02:33:07,859 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c4730a2bacf8:0, corePoolSize=10, maxPoolSize=10 2024-11-18T02:33:07,859 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,859 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:33:07,859 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,860 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731897217860 2024-11-18T02:33:07,860 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T02:33:07,860 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T02:33:07,860 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T02:33:07,860 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T02:33:07,860 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T02:33:07,860 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T02:33:07,860 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:33:07,860 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,861 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T02:33:07,861 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T02:33:07,861 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T02:33:07,861 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T02:33:07,861 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T02:33:07,861 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T02:33:07,862 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897187861,5,FailOnTimeoutGroup] 2024-11-18T02:33:07,862 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:07,862 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897187862,5,FailOnTimeoutGroup] 2024-11-18T02:33:07,862 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,862 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T02:33:07,862 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T02:33:07,862 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,862 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:33:07,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741831_1007 (size=1321) 2024-11-18T02:33:07,868 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T02:33:07,868 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803 2024-11-18T02:33:07,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:33:07,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741832_1008 (size=32) 2024-11-18T02:33:07,873 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:33:07,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:33:07,875 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:33:07,876 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:07,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:33:07,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:33:07,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:33:07,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:07,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:33:07,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:33:07,878 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:33:07,878 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:07,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:33:07,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:33:07,879 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:33:07,879 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:07,880 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:33:07,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:33:07,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740 2024-11-18T02:33:07,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740 2024-11-18T02:33:07,882 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:33:07,882 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:33:07,883 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:33:07,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:33:07,886 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T02:33:07,886 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813385, jitterRate=0.034273430705070496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:33:07,886 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731897187874Initializing all the Stores at 1731897187874Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897187874Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897187874Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897187874Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897187874Cleaning up temporary data from old regions at 1731897187882 (+8 ms)Region opened successfully at 1731897187886 (+4 ms) 2024-11-18T02:33:07,887 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:33:07,887 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:33:07,887 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:33:07,887 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:33:07,887 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:33:07,887 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:33:07,887 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897187887Disabling compacts and flushes for region at 1731897187887Disabling writes for close at 1731897187887Writing region close event to WAL at 1731897187887Closed at 1731897187887 2024-11-18T02:33:07,888 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:33:07,888 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T02:33:07,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T02:33:07,889 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:33:07,890 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T02:33:07,920 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(746): ClusterId : 78b655eb-f3ba-4391-b297-7676d77f4e04 2024-11-18T02:33:07,920 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T02:33:07,924 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T02:33:07,924 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T02:33:07,925 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T02:33:07,926 DEBUG [RS:0;c4730a2bacf8:44241 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13fc59a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c4730a2bacf8/172.17.0.2:0 2024-11-18T02:33:07,938 DEBUG [RS:0;c4730a2bacf8:44241 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c4730a2bacf8:44241 2024-11-18T02:33:07,938 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T02:33:07,938 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T02:33:07,938 DEBUG [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T02:33:07,938 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(2659): reportForDuty to master=c4730a2bacf8,33589,1731897187662 with port=44241, startcode=1731897187708 2024-11-18T02:33:07,938 DEBUG [RS:0;c4730a2bacf8:44241 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T02:33:07,940 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41767, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T02:33:07,941 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33589 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:07,941 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33589 {}] master.ServerManager(517): Registering regionserver=c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:07,942 DEBUG [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803 2024-11-18T02:33:07,942 DEBUG [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33731 2024-11-18T02:33:07,942 DEBUG [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T02:33:07,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:33:07,944 DEBUG [RS:0;c4730a2bacf8:44241 {}] zookeeper.ZKUtil(111): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:07,944 WARN [RS:0;c4730a2bacf8:44241 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T02:33:07,944 INFO [RS:0;c4730a2bacf8:44241 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:33:07,945 DEBUG [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:07,945 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c4730a2bacf8,44241,1731897187708] 2024-11-18T02:33:07,948 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T02:33:07,949 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T02:33:07,949 INFO [RS:0;c4730a2bacf8:44241 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T02:33:07,949 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,950 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T02:33:07,950 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T02:33:07,950 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,950 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,950 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,950 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,950 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,950 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,950 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c4730a2bacf8:0, corePoolSize=2, maxPoolSize=2 2024-11-18T02:33:07,950 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,950 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,951 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,951 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,951 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,951 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c4730a2bacf8:0, corePoolSize=1, maxPoolSize=1 2024-11-18T02:33:07,951 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:33:07,951 DEBUG [RS:0;c4730a2bacf8:44241 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c4730a2bacf8:0, corePoolSize=3, maxPoolSize=3 2024-11-18T02:33:07,951 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,951 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,951 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,951 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,951 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,951 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,44241,1731897187708-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:33:07,965 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T02:33:07,965 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,44241,1731897187708-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,965 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,965 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.Replication(171): c4730a2bacf8,44241,1731897187708 started 2024-11-18T02:33:07,979 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:07,979 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(1482): Serving as c4730a2bacf8,44241,1731897187708, RpcServer on c4730a2bacf8/172.17.0.2:44241, sessionid=0x10128ebc7d10001 2024-11-18T02:33:07,979 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T02:33:07,979 DEBUG [RS:0;c4730a2bacf8:44241 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:07,979 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,44241,1731897187708' 2024-11-18T02:33:07,979 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T02:33:07,980 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T02:33:07,980 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T02:33:07,980 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T02:33:07,980 DEBUG [RS:0;c4730a2bacf8:44241 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:07,980 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c4730a2bacf8,44241,1731897187708' 2024-11-18T02:33:07,980 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T02:33:07,980 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T02:33:07,981 DEBUG [RS:0;c4730a2bacf8:44241 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T02:33:07,981 INFO [RS:0;c4730a2bacf8:44241 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T02:33:07,981 INFO [RS:0;c4730a2bacf8:44241 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T02:33:08,040 WARN [c4730a2bacf8:33589 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T02:33:08,082 INFO [RS:0;c4730a2bacf8:44241 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C44241%2C1731897187708, suffix=, logDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/c4730a2bacf8,44241,1731897187708, archiveDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/oldWALs, maxLogs=32 2024-11-18T02:33:08,083 INFO [RS:0;c4730a2bacf8:44241 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44241%2C1731897187708.1731897188083 2024-11-18T02:33:08,088 INFO [RS:0;c4730a2bacf8:44241 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/c4730a2bacf8,44241,1731897187708/c4730a2bacf8%2C44241%2C1731897187708.1731897188083 2024-11-18T02:33:08,089 DEBUG [RS:0;c4730a2bacf8:44241 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42829:42829),(127.0.0.1/127.0.0.1:36763:36763)] 2024-11-18T02:33:08,291 DEBUG [c4730a2bacf8:33589 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T02:33:08,291 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:08,292 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,44241,1731897187708, state=OPENING 2024-11-18T02:33:08,294 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T02:33:08,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:08,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:08,298 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T02:33:08,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:33:08,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:33:08,298 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,44241,1731897187708}] 2024-11-18T02:33:08,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,33905,1731897008067/c4730a2bacf8%2C33905%2C1731897008067.1731897008303 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:08,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43001/user/jenkins/test-data/f1e3360f-195b-4212-e766-ea429525cc29/WALs/c4730a2bacf8,42723,1731897006974/c4730a2bacf8%2C42723%2C1731897006974.meta.1731897007934.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T02:33:08,451 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T02:33:08,453 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50509, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T02:33:08,456 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T02:33:08,456 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:33:08,457 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c4730a2bacf8%2C44241%2C1731897187708.meta, suffix=.meta, logDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/c4730a2bacf8,44241,1731897187708, archiveDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/oldWALs, maxLogs=32 2024-11-18T02:33:08,458 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c4730a2bacf8%2C44241%2C1731897187708.meta.1731897188458.meta 2024-11-18T02:33:08,465 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/c4730a2bacf8,44241,1731897187708/c4730a2bacf8%2C44241%2C1731897187708.meta.1731897188458.meta 2024-11-18T02:33:08,469 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36763:36763),(127.0.0.1/127.0.0.1:42829:42829)] 2024-11-18T02:33:08,474 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T02:33:08,474 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T02:33:08,474 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T02:33:08,474 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T02:33:08,475 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T02:33:08,475 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T02:33:08,475 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T02:33:08,475 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T02:33:08,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T02:33:08,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T02:33:08,477 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:08,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:33:08,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T02:33:08,477 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T02:33:08,478 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:08,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:33:08,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T02:33:08,478 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T02:33:08,479 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:08,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:33:08,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T02:33:08,480 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T02:33:08,480 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T02:33:08,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T02:33:08,480 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T02:33:08,481 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740 2024-11-18T02:33:08,482 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740 2024-11-18T02:33:08,483 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T02:33:08,483 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T02:33:08,483 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T02:33:08,484 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T02:33:08,485 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769393, jitterRate=-0.021667152643203735}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T02:33:08,485 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T02:33:08,485 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731897188475Writing region info on filesystem at 1731897188475Initializing all the Stores at 1731897188475Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897188475Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897188476 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731897188476Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731897188476Cleaning up temporary data from old regions at 1731897188483 (+7 ms)Running coprocessor post-open hooks at 1731897188485 (+2 ms)Region opened successfully at 1731897188485 2024-11-18T02:33:08,486 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731897188450 2024-11-18T02:33:08,488 DEBUG [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T02:33:08,488 INFO [RS_OPEN_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T02:33:08,489 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:08,490 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c4730a2bacf8,44241,1731897187708, state=OPEN 2024-11-18T02:33:08,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:33:08,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T02:33:08,497 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:08,497 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:33:08,497 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T02:33:08,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T02:33:08,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c4730a2bacf8,44241,1731897187708 in 199 msec 2024-11-18T02:33:08,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T02:33:08,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-11-18T02:33:08,501 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T02:33:08,501 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T02:33:08,502 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:33:08,502 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,44241,1731897187708, seqNum=-1] 2024-11-18T02:33:08,503 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:33:08,504 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40145, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:33:08,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 650 msec 2024-11-18T02:33:08,508 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731897188508, completionTime=-1 2024-11-18T02:33:08,508 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T02:33:08,508 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731897248510 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731897308510 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33589,1731897187662-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33589,1731897187662-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33589,1731897187662-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c4730a2bacf8:33589, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:08,510 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:08,512 DEBUG [master/c4730a2bacf8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T02:33:08,514 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.775sec 2024-11-18T02:33:08,514 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T02:33:08,514 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T02:33:08,514 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T02:33:08,514 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T02:33:08,514 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T02:33:08,514 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33589,1731897187662-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T02:33:08,514 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33589,1731897187662-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T02:33:08,516 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T02:33:08,516 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T02:33:08,516 INFO [master/c4730a2bacf8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c4730a2bacf8,33589,1731897187662-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T02:33:08,520 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62b479b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:33:08,520 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c4730a2bacf8,33589,-1 for getting cluster id 2024-11-18T02:33:08,521 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T02:33:08,522 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '78b655eb-f3ba-4391-b297-7676d77f4e04' 2024-11-18T02:33:08,522 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T02:33:08,522 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "78b655eb-f3ba-4391-b297-7676d77f4e04" 2024-11-18T02:33:08,522 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ff46454, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:33:08,523 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c4730a2bacf8,33589,-1] 2024-11-18T02:33:08,523 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T02:33:08,523 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:08,524 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54170, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T02:33:08,524 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@335e1da0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T02:33:08,525 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T02:33:08,525 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c4730a2bacf8,44241,1731897187708, seqNum=-1] 2024-11-18T02:33:08,526 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T02:33:08,526 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50782, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T02:33:08,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c4730a2bacf8,33589,1731897187662 2024-11-18T02:33:08,528 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T02:33:08,530 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T02:33:08,530 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T02:33:08,532 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/test.com,8080,1, archiveDir=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/oldWALs, maxLogs=32 2024-11-18T02:33:08,532 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731897188532 2024-11-18T02:33:08,537 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/test.com,8080,1/test.com%2C8080%2C1.1731897188532 2024-11-18T02:33:08,538 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36763:36763),(127.0.0.1/127.0.0.1:42829:42829)] 2024-11-18T02:33:08,538 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731897188538 2024-11-18T02:33:08,542 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,542 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,543 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,543 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,543 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,543 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/test.com,8080,1/test.com%2C8080%2C1.1731897188532 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/test.com,8080,1/test.com%2C8080%2C1.1731897188538 2024-11-18T02:33:08,544 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36763:36763),(127.0.0.1/127.0.0.1:42829:42829)] 2024-11-18T02:33:08,544 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/test.com,8080,1/test.com%2C8080%2C1.1731897188532 is not closed yet, will try archiving it next time 2024-11-18T02:33:08,544 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,544 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741835_1011 (size=93) 2024-11-18T02:33:08,544 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,544 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,545 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741835_1011 (size=93) 2024-11-18T02:33:08,547 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/WALs/test.com,8080,1/test.com%2C8080%2C1.1731897188532 to hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/oldWALs/test.com%2C8080%2C1.1731897188532 2024-11-18T02:33:08,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741836_1012 (size=93) 2024-11-18T02:33:08,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741836_1012 (size=93) 2024-11-18T02:33:08,550 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/oldWALs 2024-11-18T02:33:08,550 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731897188538) 2024-11-18T02:33:08,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T02:33:08,550 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:33:08,550 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:33:08,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:08,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:08,550 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T02:33:08,550 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T02:33:08,550 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=236212020, stopped=false 2024-11-18T02:33:08,550 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c4730a2bacf8,33589,1731897187662 2024-11-18T02:33:08,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:33:08,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T02:33:08,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:08,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:08,552 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:33:08,552 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T02:33:08,552 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:33:08,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:08,552 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c4730a2bacf8,44241,1731897187708' ***** 2024-11-18T02:33:08,553 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T02:33:08,553 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T02:33:08,553 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T02:33:08,553 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(959): stopping server c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c4730a2bacf8:44241. 2024-11-18T02:33:08,553 DEBUG [RS:0;c4730a2bacf8:44241 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T02:33:08,553 DEBUG [RS:0;c4730a2bacf8:44241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T02:33:08,553 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T02:33:08,554 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T02:33:08,554 DEBUG [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T02:33:08,554 DEBUG [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T02:33:08,554 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T02:33:08,554 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T02:33:08,554 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T02:33:08,554 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T02:33:08,554 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T02:33:08,554 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T02:33:08,570 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740/.tmp/ns/91c22c5948844ddf822a87047eaaca4b is 43, key is default/ns:d/1731897188504/Put/seqid=0 2024-11-18T02:33:08,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741837_1013 (size=5153) 2024-11-18T02:33:08,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741837_1013 (size=5153) 2024-11-18T02:33:08,574 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740/.tmp/ns/91c22c5948844ddf822a87047eaaca4b 2024-11-18T02:33:08,579 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740/.tmp/ns/91c22c5948844ddf822a87047eaaca4b as hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740/ns/91c22c5948844ddf822a87047eaaca4b 2024-11-18T02:33:08,582 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740/ns/91c22c5948844ddf822a87047eaaca4b, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T02:33:08,583 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-11-18T02:33:08,584 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T02:33:08,587 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T02:33:08,588 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T02:33:08,588 INFO [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T02:33:08,588 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731897188554Running coprocessor pre-close hooks at 1731897188554Disabling compacts and flushes for region at 1731897188554Disabling writes for close at 1731897188554Obtaining lock to block concurrent updates at 1731897188554Preparing flush snapshotting stores in 1588230740 at 1731897188554Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731897188554Flushing stores of hbase:meta,,1.1588230740 at 1731897188555 (+1 ms)Flushing 1588230740/ns: creating writer at 1731897188555Flushing 1588230740/ns: appending metadata at 1731897188569 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731897188569Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a1f2182: reopening flushed file at 1731897188578 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1731897188583 (+5 ms)Writing region close event to WAL at 1731897188585 (+2 ms)Running coprocessor post-close hooks at 1731897188588 (+3 ms)Closed at 1731897188588 2024-11-18T02:33:08,588 DEBUG [RS_CLOSE_META-regionserver/c4730a2bacf8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T02:33:08,754 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(976): stopping server c4730a2bacf8,44241,1731897187708; all regions closed. 2024-11-18T02:33:08,755 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,755 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,755 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,755 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,755 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741834_1010 (size=1152) 2024-11-18T02:33:08,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741834_1010 (size=1152) 2024-11-18T02:33:08,759 DEBUG [RS:0;c4730a2bacf8:44241 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/oldWALs 2024-11-18T02:33:08,759 INFO [RS:0;c4730a2bacf8:44241 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C44241%2C1731897187708.meta:.meta(num 1731897188458) 2024-11-18T02:33:08,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,760 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,760 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741833_1009 (size=93) 2024-11-18T02:33:08,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741833_1009 (size=93) 2024-11-18T02:33:08,763 DEBUG [RS:0;c4730a2bacf8:44241 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/oldWALs 2024-11-18T02:33:08,763 INFO [RS:0;c4730a2bacf8:44241 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c4730a2bacf8%2C44241%2C1731897187708:(num 1731897188083) 2024-11-18T02:33:08,763 DEBUG [RS:0;c4730a2bacf8:44241 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T02:33:08,763 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T02:33:08,763 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:33:08,763 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.ChoreService(370): Chore service for: regionserver/c4730a2bacf8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T02:33:08,763 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:33:08,764 INFO [regionserver/c4730a2bacf8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:33:08,764 INFO [RS:0;c4730a2bacf8:44241 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44241 2024-11-18T02:33:08,767 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:33:08,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T02:33:08,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c4730a2bacf8,44241,1731897187708 2024-11-18T02:33:08,768 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c4730a2bacf8,44241,1731897187708] 2024-11-18T02:33:08,770 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c4730a2bacf8,44241,1731897187708 already deleted, retry=false 2024-11-18T02:33:08,770 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c4730a2bacf8,44241,1731897187708 expired; onlineServers=0 2024-11-18T02:33:08,770 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c4730a2bacf8,33589,1731897187662' ***** 2024-11-18T02:33:08,770 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T02:33:08,770 INFO [M:0;c4730a2bacf8:33589 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T02:33:08,770 INFO [M:0;c4730a2bacf8:33589 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T02:33:08,770 DEBUG [M:0;c4730a2bacf8:33589 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T02:33:08,770 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T02:33:08,770 DEBUG [M:0;c4730a2bacf8:33589 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T02:33:08,770 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897187862 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.small.0-1731897187862,5,FailOnTimeoutGroup] 2024-11-18T02:33:08,770 DEBUG [master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897187861 {}] cleaner.HFileCleaner(306): Exit Thread[master/c4730a2bacf8:0:becomeActiveMaster-HFileCleaner.large.0-1731897187861,5,FailOnTimeoutGroup] 2024-11-18T02:33:08,770 INFO [M:0;c4730a2bacf8:33589 {}] hbase.ChoreService(370): Chore service for: master/c4730a2bacf8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T02:33:08,770 INFO [M:0;c4730a2bacf8:33589 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T02:33:08,770 DEBUG [M:0;c4730a2bacf8:33589 {}] master.HMaster(1795): Stopping service threads 2024-11-18T02:33:08,770 INFO [M:0;c4730a2bacf8:33589 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T02:33:08,770 INFO [M:0;c4730a2bacf8:33589 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T02:33:08,770 INFO [M:0;c4730a2bacf8:33589 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T02:33:08,770 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T02:33:08,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T02:33:08,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T02:33:08,771 DEBUG [M:0;c4730a2bacf8:33589 {}] zookeeper.ZKUtil(347): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T02:33:08,771 WARN [M:0;c4730a2bacf8:33589 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T02:33:08,772 INFO [M:0;c4730a2bacf8:33589 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/.lastflushedseqids 2024-11-18T02:33:08,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741838_1014 (size=108) 2024-11-18T02:33:08,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741838_1014 (size=108) 2024-11-18T02:33:08,777 INFO [M:0;c4730a2bacf8:33589 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T02:33:08,777 INFO [M:0;c4730a2bacf8:33589 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T02:33:08,777 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T02:33:08,777 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:08,777 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:08,777 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T02:33:08,777 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:08,777 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T02:33:08,792 DEBUG [M:0;c4730a2bacf8:33589 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98d27786890f46e38b3a080e430b29d1 is 82, key is hbase:meta,,1/info:regioninfo/1731897188489/Put/seqid=0 2024-11-18T02:33:08,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741839_1015 (size=5672) 2024-11-18T02:33:08,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741839_1015 (size=5672) 2024-11-18T02:33:08,797 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98d27786890f46e38b3a080e430b29d1 2024-11-18T02:33:08,814 DEBUG [M:0;c4730a2bacf8:33589 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1a11d19e5f67461eb26f8474479ed8cb is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731897188507/Put/seqid=0 2024-11-18T02:33:08,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741840_1016 (size=5275) 2024-11-18T02:33:08,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741840_1016 (size=5275) 2024-11-18T02:33:08,819 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1a11d19e5f67461eb26f8474479ed8cb 2024-11-18T02:33:08,836 DEBUG [M:0;c4730a2bacf8:33589 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d609fbfe345418f8e325f069a305a94 is 69, key is c4730a2bacf8,44241,1731897187708/rs:state/1731897187941/Put/seqid=0 2024-11-18T02:33:08,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741841_1017 (size=5156) 2024-11-18T02:33:08,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741841_1017 (size=5156) 2024-11-18T02:33:08,840 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d609fbfe345418f8e325f069a305a94 2024-11-18T02:33:08,857 DEBUG [M:0;c4730a2bacf8:33589 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c67d0139d641419eabbf2af980bb7ea9 is 52, key is load_balancer_on/state:d/1731897188529/Put/seqid=0 2024-11-18T02:33:08,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741842_1018 (size=5056) 2024-11-18T02:33:08,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741842_1018 (size=5056) 2024-11-18T02:33:08,862 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c67d0139d641419eabbf2af980bb7ea9 2024-11-18T02:33:08,866 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98d27786890f46e38b3a080e430b29d1 as hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98d27786890f46e38b3a080e430b29d1 2024-11-18T02:33:08,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:33:08,869 INFO [RS:0;c4730a2bacf8:44241 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:33:08,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44241-0x10128ebc7d10001, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:33:08,869 INFO [RS:0;c4730a2bacf8:44241 {}] regionserver.HRegionServer(1031): Exiting; stopping=c4730a2bacf8,44241,1731897187708; zookeeper connection closed. 2024-11-18T02:33:08,869 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@27b68a36 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@27b68a36 2024-11-18T02:33:08,869 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T02:33:08,870 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98d27786890f46e38b3a080e430b29d1, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T02:33:08,871 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1a11d19e5f67461eb26f8474479ed8cb as hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1a11d19e5f67461eb26f8474479ed8cb 2024-11-18T02:33:08,874 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1a11d19e5f67461eb26f8474479ed8cb, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T02:33:08,875 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d609fbfe345418f8e325f069a305a94 as hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d609fbfe345418f8e325f069a305a94 2024-11-18T02:33:08,878 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d609fbfe345418f8e325f069a305a94, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T02:33:08,879 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c67d0139d641419eabbf2af980bb7ea9 as hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c67d0139d641419eabbf2af980bb7ea9 2024-11-18T02:33:08,883 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33731/user/jenkins/test-data/7f39bc1a-c0b0-023f-96e0-be6da2e38803/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c67d0139d641419eabbf2af980bb7ea9, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T02:33:08,883 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false 2024-11-18T02:33:08,885 INFO [M:0;c4730a2bacf8:33589 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T02:33:08,885 DEBUG [M:0;c4730a2bacf8:33589 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731897188777Disabling compacts and flushes for region at 1731897188777Disabling writes for close at 1731897188777Obtaining lock to block concurrent updates at 1731897188777Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731897188777Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731897188778 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731897188778Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731897188778Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731897188792 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731897188792Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731897188800 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731897188814 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731897188814Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731897188822 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731897188836 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731897188836Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731897188844 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731897188857 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731897188857Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c6c7777: reopening flushed file at 1731897188866 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11012aa3: reopening flushed file at 1731897188870 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1edb98ba: reopening flushed file at 1731897188874 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79193fd8: reopening flushed file at 1731897188878 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false at 1731897188883 (+5 ms)Writing region close event to WAL at 1731897188885 (+2 ms)Closed at 1731897188885 2024-11-18T02:33:08,885 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,885 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,886 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,886 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,886 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T02:33:08,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44889 is added to blk_1073741830_1006 (size=10311) 2024-11-18T02:33:08,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34605 is added to blk_1073741830_1006 (size=10311) 2024-11-18T02:33:08,888 INFO [M:0;c4730a2bacf8:33589 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T02:33:08,888 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T02:33:08,888 INFO [M:0;c4730a2bacf8:33589 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33589 2024-11-18T02:33:08,888 INFO [M:0;c4730a2bacf8:33589 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T02:33:08,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:33:08,992 INFO [M:0;c4730a2bacf8:33589 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T02:33:08,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33589-0x10128ebc7d10000, quorum=127.0.0.1:52545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T02:33:08,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@23e03366{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:33:08,995 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fb7753b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:33:08,995 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:33:08,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bb82bb7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:33:08,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a1af98b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/hadoop.log.dir/,STOPPED} 2024-11-18T02:33:08,996 WARN [BP-1855444151-172.17.0.2-1731897187004 heartbeating to localhost/127.0.0.1:33731 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:33:08,996 WARN [BP-1855444151-172.17.0.2-1731897187004 heartbeating to localhost/127.0.0.1:33731 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1855444151-172.17.0.2-1731897187004 (Datanode Uuid f037842b-4864-4b36-98b0-2730ca785b0e) service to localhost/127.0.0.1:33731 2024-11-18T02:33:08,996 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:33:08,996 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:33:08,997 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/data/data3/current/BP-1855444151-172.17.0.2-1731897187004 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:33:08,997 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/data/data4/current/BP-1855444151-172.17.0.2-1731897187004 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:33:08,997 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:33:08,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17951be7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T02:33:08,999 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e818af2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:33:08,999 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:33:08,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60fdf071{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:33:08,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f9090c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/hadoop.log.dir/,STOPPED} 2024-11-18T02:33:09,000 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T02:33:09,000 WARN [BP-1855444151-172.17.0.2-1731897187004 heartbeating to localhost/127.0.0.1:33731 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T02:33:09,000 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T02:33:09,000 WARN [BP-1855444151-172.17.0.2-1731897187004 heartbeating to localhost/127.0.0.1:33731 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1855444151-172.17.0.2-1731897187004 (Datanode Uuid 571c92b7-60b0-43b7-899e-d9df4e767393) service to localhost/127.0.0.1:33731 2024-11-18T02:33:09,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/data/data1/current/BP-1855444151-172.17.0.2-1731897187004 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:33:09,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/cluster_38c52495-be93-33fb-ee36-1b15486b54f2/data/data2/current/BP-1855444151-172.17.0.2-1731897187004 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T02:33:09,001 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T02:33:09,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c0b3275{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T02:33:09,007 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d7cc0b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T02:33:09,007 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T02:33:09,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ac034ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T02:33:09,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e290644{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7b79e912-5dfc-229a-ad4d-2af9bea5e707/hadoop.log.dir/,STOPPED} 2024-11-18T02:33:09,013 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T02:33:09,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T02:33:09,036 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=272 (was 233) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33731 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33731 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33731 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:33731 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33731 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33731 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33731 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33731 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=536 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=42 (was 42), ProcessCount=11 (was 11), AvailableMemoryMB=2801 (was 2805)