2024-11-12 14:32:31,873 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-12 14:32:31,885 main DEBUG Took 0.009829 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-12 14:32:31,885 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-12 14:32:31,886 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-12 14:32:31,887 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-12 14:32:31,888 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,903 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-12 14:32:31,916 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,917 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,918 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,918 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,919 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,919 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,920 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,920 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,921 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,921 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,922 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,922 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,923 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,923 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,924 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,924 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,925 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,925 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,925 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,926 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,926 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,927 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,927 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,927 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-12 14:32:31,928 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,928 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-12 14:32:31,929 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-12 14:32:31,931 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-12 14:32:31,933 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-12 14:32:31,933 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-12 14:32:31,934 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-12 14:32:31,935 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-12 14:32:31,942 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-12 14:32:31,945 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-12 14:32:31,947 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-12 14:32:31,947 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-12 14:32:31,948 main DEBUG createAppenders(={Console}) 2024-11-12 14:32:31,948 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-12 14:32:31,949 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-12 14:32:31,949 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-12 14:32:31,949 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-12 14:32:31,950 main DEBUG OutputStream closed 2024-11-12 14:32:31,950 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-12 14:32:31,950 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-12 14:32:31,950 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-12 14:32:32,015 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-12 14:32:32,017 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-12 14:32:32,018 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-12 14:32:32,019 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-12 14:32:32,019 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-12 14:32:32,020 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-12 14:32:32,020 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-12 14:32:32,020 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-12 14:32:32,020 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-12 14:32:32,021 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-12 14:32:32,021 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-12 14:32:32,021 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-12 14:32:32,022 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-12 14:32:32,022 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-12 14:32:32,022 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-12 14:32:32,022 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-12 14:32:32,023 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-12 14:32:32,023 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-12 14:32:32,025 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12 14:32:32,026 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-12 14:32:32,026 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-12 14:32:32,027 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-12T14:32:32,039 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-12 14:32:32,042 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-12 14:32:32,042 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-12T14:32:32,279 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef 2024-11-12T14:32:32,304 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df, deleteOnExit=true 2024-11-12T14:32:32,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/test.cache.data in system properties and HBase conf 2024-11-12T14:32:32,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T14:32:32,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir in system properties and HBase conf 2024-11-12T14:32:32,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T14:32:32,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T14:32:32,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T14:32:32,402 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-12T14:32:32,491 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T14:32:32,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T14:32:32,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T14:32:32,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T14:32:32,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T14:32:32,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T14:32:32,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T14:32:32,499 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T14:32:32,499 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T14:32:32,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T14:32:32,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/nfs.dump.dir in system properties and HBase conf 2024-11-12T14:32:32,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/java.io.tmpdir in system properties and HBase conf 2024-11-12T14:32:32,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T14:32:32,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T14:32:32,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T14:32:33,628 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-12T14:32:33,702 INFO [Time-limited test {}] log.Log(170): Logging initialized @2461ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-12T14:32:33,776 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:33,838 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T14:32:33,859 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T14:32:33,860 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T14:32:33,861 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T14:32:33,873 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:33,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir/,AVAILABLE} 2024-11-12T14:32:33,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T14:32:34,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/java.io.tmpdir/jetty-localhost-42273-hadoop-hdfs-3_4_1-tests_jar-_-any-6162693084381748425/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T14:32:34,052 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:42273} 2024-11-12T14:32:34,052 INFO [Time-limited test {}] server.Server(415): Started @2812ms 2024-11-12T14:32:34,621 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:34,629 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T14:32:34,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T14:32:34,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T14:32:34,630 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T14:32:34,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir/,AVAILABLE} 2024-11-12T14:32:34,632 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T14:32:34,731 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/java.io.tmpdir/jetty-localhost-41861-hadoop-hdfs-3_4_1-tests_jar-_-any-7568886470414565756/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:34,731 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:41861} 2024-11-12T14:32:34,731 INFO [Time-limited test {}] server.Server(415): Started @3491ms 2024-11-12T14:32:34,778 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T14:32:34,883 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:34,889 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T14:32:34,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T14:32:34,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T14:32:34,893 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T14:32:34,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir/,AVAILABLE} 2024-11-12T14:32:34,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T14:32:35,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/java.io.tmpdir/jetty-localhost-34195-hadoop-hdfs-3_4_1-tests_jar-_-any-16637970109596166506/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:35,018 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:34195} 2024-11-12T14:32:35,018 INFO [Time-limited test {}] server.Server(415): Started @3778ms 2024-11-12T14:32:35,021 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T14:32:35,055 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:35,060 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T14:32:35,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T14:32:35,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T14:32:35,062 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T14:32:35,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir/,AVAILABLE} 2024-11-12T14:32:35,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T14:32:35,162 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/java.io.tmpdir/jetty-localhost-44137-hadoop-hdfs-3_4_1-tests_jar-_-any-7393017476426147090/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:35,162 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:44137} 2024-11-12T14:32:35,163 INFO [Time-limited test {}] server.Server(415): Started @3922ms 2024-11-12T14:32:35,164 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T14:32:36,833 WARN [Thread-120 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data1/current/BP-1644289805-172.17.0.3-1731421953022/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:36,833 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data2/current/BP-1644289805-172.17.0.3-1731421953022/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:36,866 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T14:32:36,893 WARN [Thread-133 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data5/current/BP-1644289805-172.17.0.3-1731421953022/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:36,894 WARN [Thread-135 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data3/current/BP-1644289805-172.17.0.3-1731421953022/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:36,894 WARN [Thread-134 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data6/current/BP-1644289805-172.17.0.3-1731421953022/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:36,894 WARN [Thread-136 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data4/current/BP-1644289805-172.17.0.3-1731421953022/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:36,914 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T14:32:36,916 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa99bafb705c0bb1 with lease ID 0x8a71db518a52eb9f: Processing first storage report for DS-aa932ba3-a8b2-4490-8191-b38d3142db20 from datanode DatanodeRegistration(127.0.0.1:45343, datanodeUuid=34f4acec-26f1-426b-af11-e85dcfae0493, infoPort=34333, infoSecurePort=0, ipcPort=45241, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022) 2024-11-12T14:32:36,917 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa99bafb705c0bb1 with lease ID 0x8a71db518a52eb9f: from storage DS-aa932ba3-a8b2-4490-8191-b38d3142db20 node DatanodeRegistration(127.0.0.1:45343, datanodeUuid=34f4acec-26f1-426b-af11-e85dcfae0493, infoPort=34333, infoSecurePort=0, ipcPort=45241, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-12T14:32:36,918 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T14:32:36,918 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa99bafb705c0bb1 with lease ID 0x8a71db518a52eb9f: Processing first storage report for DS-ba20be91-ca1d-4af2-a64b-72bf288a87ef from datanode DatanodeRegistration(127.0.0.1:45343, datanodeUuid=34f4acec-26f1-426b-af11-e85dcfae0493, infoPort=34333, infoSecurePort=0, ipcPort=45241, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022) 2024-11-12T14:32:36,918 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa99bafb705c0bb1 with lease ID 0x8a71db518a52eb9f: from storage DS-ba20be91-ca1d-4af2-a64b-72bf288a87ef node DatanodeRegistration(127.0.0.1:45343, datanodeUuid=34f4acec-26f1-426b-af11-e85dcfae0493, infoPort=34333, infoSecurePort=0, ipcPort=45241, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-12T14:32:36,921 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd643e21d0a0ca397 with lease ID 0x8a71db518a52eba0: Processing first storage report for DS-173cc6dd-8f7d-4e6a-9dff-ed5a70f45d17 from datanode DatanodeRegistration(127.0.0.1:39633, datanodeUuid=13a1360e-497b-4c69-900e-f4c0265385c2, infoPort=45153, infoSecurePort=0, ipcPort=32779, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022) 2024-11-12T14:32:36,921 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd643e21d0a0ca397 with lease ID 0x8a71db518a52eba0: from storage DS-173cc6dd-8f7d-4e6a-9dff-ed5a70f45d17 node DatanodeRegistration(127.0.0.1:39633, datanodeUuid=13a1360e-497b-4c69-900e-f4c0265385c2, infoPort=45153, infoSecurePort=0, ipcPort=32779, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:36,922 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd643e21d0a0ca397 with lease ID 0x8a71db518a52eba0: Processing first storage report for DS-ecc3c28a-5258-47ab-aa07-8cd8f4c2d26e from datanode DatanodeRegistration(127.0.0.1:39633, datanodeUuid=13a1360e-497b-4c69-900e-f4c0265385c2, infoPort=45153, infoSecurePort=0, ipcPort=32779, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022) 2024-11-12T14:32:36,922 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd643e21d0a0ca397 with lease ID 0x8a71db518a52eba0: from storage DS-ecc3c28a-5258-47ab-aa07-8cd8f4c2d26e node DatanodeRegistration(127.0.0.1:39633, datanodeUuid=13a1360e-497b-4c69-900e-f4c0265385c2, infoPort=45153, infoSecurePort=0, ipcPort=32779, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:36,922 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfebb0677cc4d3ae3 with lease ID 0x8a71db518a52eba1: Processing first storage report for DS-c8cc485f-7cfc-4031-a115-8cf1c95fcfce from datanode DatanodeRegistration(127.0.0.1:34979, datanodeUuid=fdf1fafe-0af1-45e8-8b36-cbdef7e8d07e, infoPort=37193, infoSecurePort=0, ipcPort=42505, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022) 2024-11-12T14:32:36,922 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfebb0677cc4d3ae3 with lease ID 0x8a71db518a52eba1: from storage DS-c8cc485f-7cfc-4031-a115-8cf1c95fcfce node DatanodeRegistration(127.0.0.1:34979, datanodeUuid=fdf1fafe-0af1-45e8-8b36-cbdef7e8d07e, infoPort=37193, infoSecurePort=0, ipcPort=42505, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:36,922 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfebb0677cc4d3ae3 with lease ID 0x8a71db518a52eba1: Processing first storage report for DS-79990a9d-baf3-42c5-a195-77ebe5e0a56a from datanode DatanodeRegistration(127.0.0.1:34979, datanodeUuid=fdf1fafe-0af1-45e8-8b36-cbdef7e8d07e, infoPort=37193, infoSecurePort=0, ipcPort=42505, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022) 2024-11-12T14:32:36,922 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfebb0677cc4d3ae3 with lease ID 0x8a71db518a52eba1: from storage DS-79990a9d-baf3-42c5-a195-77ebe5e0a56a node DatanodeRegistration(127.0.0.1:34979, datanodeUuid=fdf1fafe-0af1-45e8-8b36-cbdef7e8d07e, infoPort=37193, infoSecurePort=0, ipcPort=42505, storageInfo=lv=-57;cid=testClusterID;nsid=592991430;c=1731421953022), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:36,983 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef 2024-11-12T14:32:37,053 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-12T14:32:37,110 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=162, ProcessCount=11, AvailableMemoryMB=7724 2024-11-12T14:32:37,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T14:32:37,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-12T14:32:37,365 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/zookeeper_0, clientPort=56395, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T14:32:37,374 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56395 2024-11-12T14:32:37,397 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:37,400 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:37,490 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:37,490 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:37,536 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:54464 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:39633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54464 dst: /127.0.0.1:39633 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:37,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-12T14:32:37,956 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:37,970 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0 with version=8 2024-11-12T14:32:37,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/hbase-staging 2024-11-12T14:32:38,049 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-12T14:32:38,284 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2b6d221c5cde:0 server-side Connection retries=45 2024-11-12T14:32:38,292 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:38,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:38,297 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T14:32:38,297 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:38,297 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T14:32:38,427 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T14:32:38,481 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-12T14:32:38,489 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-12T14:32:38,492 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T14:32:38,515 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 92702 (auto-detected) 2024-11-12T14:32:38,516 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-12T14:32:38,531 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33149 2024-11-12T14:32:38,549 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33149 connecting to ZooKeeper ensemble=127.0.0.1:56395 2024-11-12T14:32:38,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331490x0, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T14:32:38,692 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33149-0x1012f7668900000 connected 2024-11-12T14:32:38,781 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:38,785 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:38,796 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:38,799 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0, hbase.cluster.distributed=false 2024-11-12T14:32:38,819 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T14:32:38,824 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33149 2024-11-12T14:32:38,824 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33149 2024-11-12T14:32:38,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33149 2024-11-12T14:32:38,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33149 2024-11-12T14:32:38,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33149 2024-11-12T14:32:38,921 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2b6d221c5cde:0 server-side Connection retries=45 2024-11-12T14:32:38,922 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:38,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:38,923 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T14:32:38,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:38,923 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T14:32:38,926 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T14:32:38,928 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T14:32:38,929 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40627 2024-11-12T14:32:38,931 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40627 connecting to ZooKeeper ensemble=127.0.0.1:56395 2024-11-12T14:32:38,932 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:38,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:38,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406270x0, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T14:32:38,949 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40627-0x1012f7668900001 connected 2024-11-12T14:32:38,949 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:38,953 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T14:32:38,962 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T14:32:38,964 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T14:32:38,969 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T14:32:38,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40627 2024-11-12T14:32:38,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40627 2024-11-12T14:32:38,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40627 2024-11-12T14:32:38,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40627 2024-11-12T14:32:38,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40627 2024-11-12T14:32:38,988 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2b6d221c5cde:0 server-side Connection retries=45 2024-11-12T14:32:38,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:38,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:38,990 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T14:32:38,990 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:38,990 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T14:32:38,990 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T14:32:38,991 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T14:32:38,992 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39331 2024-11-12T14:32:38,995 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39331 connecting to ZooKeeper ensemble=127.0.0.1:56395 2024-11-12T14:32:38,996 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:39,000 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:39,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:393310x0, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T14:32:39,013 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:393310x0, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:39,013 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39331-0x1012f7668900002 connected 2024-11-12T14:32:39,013 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T14:32:39,014 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T14:32:39,015 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T14:32:39,018 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T14:32:39,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39331 2024-11-12T14:32:39,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39331 2024-11-12T14:32:39,020 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39331 2024-11-12T14:32:39,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39331 2024-11-12T14:32:39,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39331 2024-11-12T14:32:39,037 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2b6d221c5cde:0 server-side Connection retries=45 2024-11-12T14:32:39,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:39,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:39,038 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T14:32:39,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:39,038 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T14:32:39,038 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T14:32:39,038 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T14:32:39,039 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36673 2024-11-12T14:32:39,041 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36673 connecting to ZooKeeper ensemble=127.0.0.1:56395 2024-11-12T14:32:39,042 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:39,044 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:39,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:366730x0, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T14:32:39,055 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:366730x0, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:39,055 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36673-0x1012f7668900003 connected 2024-11-12T14:32:39,055 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T14:32:39,056 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T14:32:39,057 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T14:32:39,059 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T14:32:39,059 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36673 2024-11-12T14:32:39,060 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36673 2024-11-12T14:32:39,061 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36673 2024-11-12T14:32:39,061 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36673 2024-11-12T14:32:39,062 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36673 2024-11-12T14:32:39,079 DEBUG [M:0;2b6d221c5cde:33149 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2b6d221c5cde:33149 2024-11-12T14:32:39,080 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:39,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:39,099 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:39,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T14:32:39,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T14:32:39,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T14:32:39,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,129 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T14:32:39,131 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2b6d221c5cde,33149,1731421958123 from backup master directory 2024-11-12T14:32:39,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:39,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:39,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:39,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:39,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:39,139 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T14:32:39,139 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:39,141 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-12T14:32:39,142 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-12T14:32:39,202 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/hbase.id] with ID: 4bc05241-efb1-40ab-a901-4b40ae3907ed 2024-11-12T14:32:39,202 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/.tmp/hbase.id 2024-11-12T14:32:39,209 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:39,209 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:39,212 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:59644 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:45343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59644 dst: /127.0.0.1:45343 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:39,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-12T14:32:39,219 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:39,219 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/.tmp/hbase.id]:[hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/hbase.id] 2024-11-12T14:32:39,263 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:39,268 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T14:32:39,285 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-11-12T14:32:39,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,308 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:39,308 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:39,311 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:54502 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:39633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54502 dst: /127.0.0.1:39633 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:39,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-12T14:32:39,317 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:39,332 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T14:32:39,334 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T14:32:39,339 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T14:32:39,366 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:39,366 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:39,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:59650 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:45343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59650 dst: /127.0.0.1:45343 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:39,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-12T14:32:39,376 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:39,392 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store 2024-11-12T14:32:39,406 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:39,406 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:39,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:34836 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34836 dst: /127.0.0.1:34979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:39,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-12T14:32:39,415 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:39,419 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-12T14:32:39,422 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:39,423 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T14:32:39,423 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:39,423 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:39,425 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T14:32:39,425 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:39,425 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:39,426 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731421959423Disabling compacts and flushes for region at 1731421959423Disabling writes for close at 1731421959425 (+2 ms)Writing region close event to WAL at 1731421959425Closed at 1731421959425 2024-11-12T14:32:39,428 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/.initializing 2024-11-12T14:32:39,429 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/WALs/2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:39,436 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T14:32:39,450 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C33149%2C1731421958123, suffix=, logDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/WALs/2b6d221c5cde,33149,1731421958123, archiveDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/oldWALs, maxLogs=10 2024-11-12T14:32:39,476 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/WALs/2b6d221c5cde,33149,1731421958123/2b6d221c5cde%2C33149%2C1731421958123.1731421959454, exclude list is [], retry=0 2024-11-12T14:32:39,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:39,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34979,DS-c8cc485f-7cfc-4031-a115-8cf1c95fcfce,DISK] 2024-11-12T14:32:39,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45343,DS-aa932ba3-a8b2-4490-8191-b38d3142db20,DISK] 2024-11-12T14:32:39,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39633,DS-173cc6dd-8f7d-4e6a-9dff-ed5a70f45d17,DISK] 2024-11-12T14:32:39,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-12T14:32:39,538 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/WALs/2b6d221c5cde,33149,1731421958123/2b6d221c5cde%2C33149%2C1731421958123.1731421959454 2024-11-12T14:32:39,539 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45153:45153),(127.0.0.1/127.0.0.1:37193:37193),(127.0.0.1/127.0.0.1:34333:34333)] 2024-11-12T14:32:39,540 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T14:32:39,540 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:39,543 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,544 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T14:32:39,601 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:39,603 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:39,604 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,607 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T14:32:39,607 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:39,609 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T14:32:39,609 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T14:32:39,612 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:39,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T14:32:39,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T14:32:39,616 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:39,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T14:32:39,617 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,620 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,621 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,626 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,626 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,630 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T14:32:39,633 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:39,639 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T14:32:39,640 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72028671, jitterRate=0.07331083714962006}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T14:32:39,647 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731421959555Initializing all the Stores at 1731421959557 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421959558 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421959558Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421959558Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421959559 (+1 ms)Cleaning up temporary data from old regions at 1731421959627 (+68 ms)Region opened successfully at 1731421959647 (+20 ms) 2024-11-12T14:32:39,648 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T14:32:39,677 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62790918, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b6d221c5cde/172.17.0.3:0 2024-11-12T14:32:39,704 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T14:32:39,713 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T14:32:39,713 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T14:32:39,715 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T14:32:39,717 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-12T14:32:39,721 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-12T14:32:39,721 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T14:32:39,743 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T14:32:39,750 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T14:32:39,799 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T14:32:39,803 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T14:32:39,806 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T14:32:39,812 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T14:32:39,815 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T14:32:39,820 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T14:32:39,832 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T14:32:39,835 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T14:32:39,843 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T14:32:39,860 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T14:32:39,872 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T14:32:39,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:39,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:39,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:39,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:39,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,888 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2b6d221c5cde,33149,1731421958123, sessionid=0x1012f7668900000, setting cluster-up flag (Was=false) 2024-11-12T14:32:39,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,949 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T14:32:39,953 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:39,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:39,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:40,012 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T14:32:40,014 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:40,022 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T14:32:40,067 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(746): ClusterId : 4bc05241-efb1-40ab-a901-4b40ae3907ed 2024-11-12T14:32:40,067 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(746): ClusterId : 4bc05241-efb1-40ab-a901-4b40ae3907ed 2024-11-12T14:32:40,067 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(746): ClusterId : 4bc05241-efb1-40ab-a901-4b40ae3907ed 2024-11-12T14:32:40,069 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T14:32:40,069 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T14:32:40,069 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T14:32:40,085 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T14:32:40,085 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T14:32:40,085 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T14:32:40,085 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T14:32:40,085 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T14:32:40,085 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T14:32:40,090 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T14:32:40,097 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T14:32:40,097 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T14:32:40,097 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T14:32:40,098 DEBUG [RS:0;2b6d221c5cde:40627 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f5a12d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b6d221c5cde/172.17.0.3:0 2024-11-12T14:32:40,098 DEBUG [RS:1;2b6d221c5cde:39331 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a7530bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b6d221c5cde/172.17.0.3:0 2024-11-12T14:32:40,098 DEBUG [RS:2;2b6d221c5cde:36673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25d2619d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b6d221c5cde/172.17.0.3:0 2024-11-12T14:32:40,100 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T14:32:40,108 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T14:32:40,112 DEBUG [RS:2;2b6d221c5cde:36673 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2b6d221c5cde:36673 2024-11-12T14:32:40,114 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T14:32:40,115 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T14:32:40,115 DEBUG [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T14:32:40,116 DEBUG [RS:0;2b6d221c5cde:40627 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2b6d221c5cde:40627 2024-11-12T14:32:40,116 DEBUG [RS:1;2b6d221c5cde:39331 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2b6d221c5cde:39331 2024-11-12T14:32:40,117 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T14:32:40,117 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(2659): reportForDuty to master=2b6d221c5cde,33149,1731421958123 with port=36673, startcode=1731421959037 2024-11-12T14:32:40,117 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T14:32:40,117 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T14:32:40,117 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T14:32:40,117 DEBUG [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T14:32:40,117 DEBUG [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T14:32:40,118 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(2659): reportForDuty to master=2b6d221c5cde,33149,1731421958123 with port=40627, startcode=1731421958890 2024-11-12T14:32:40,118 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(2659): reportForDuty to master=2b6d221c5cde,33149,1731421958123 with port=39331, startcode=1731421958988 2024-11-12T14:32:40,115 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2b6d221c5cde,33149,1731421958123 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T14:32:40,123 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2b6d221c5cde:0, corePoolSize=5, maxPoolSize=5 2024-11-12T14:32:40,123 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2b6d221c5cde:0, corePoolSize=5, maxPoolSize=5 2024-11-12T14:32:40,123 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=5, maxPoolSize=5 2024-11-12T14:32:40,124 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=5, maxPoolSize=5 2024-11-12T14:32:40,124 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2b6d221c5cde:0, corePoolSize=10, maxPoolSize=10 2024-11-12T14:32:40,124 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,124 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=2, maxPoolSize=2 2024-11-12T14:32:40,124 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,128 DEBUG [RS:0;2b6d221c5cde:40627 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T14:32:40,128 DEBUG [RS:1;2b6d221c5cde:39331 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T14:32:40,128 DEBUG [RS:2;2b6d221c5cde:36673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T14:32:40,129 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731421990129 2024-11-12T14:32:40,131 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T14:32:40,132 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T14:32:40,132 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T14:32:40,132 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T14:32:40,136 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T14:32:40,136 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T14:32:40,137 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T14:32:40,137 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T14:32:40,137 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,139 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:40,140 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T14:32:40,144 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T14:32:40,146 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T14:32:40,147 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T14:32:40,152 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T14:32:40,153 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T14:32:40,160 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.large.0-1731421960154,5,FailOnTimeoutGroup] 2024-11-12T14:32:40,161 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:40,161 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.small.0-1731421960160,5,FailOnTimeoutGroup] 2024-11-12T14:32:40,161 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:40,161 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,161 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T14:32:40,163 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,163 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:54532 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:39633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54532 dst: /127.0.0.1:39633 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:40,171 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57857, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T14:32:40,171 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53387, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T14:32:40,171 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40535, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T14:32:40,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-12T14:32:40,178 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33149 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:40,179 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:40,180 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T14:32:40,181 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33149 {}] master.ServerManager(517): Registering regionserver=2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:40,181 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0 2024-11-12T14:32:40,190 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:40,191 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:40,193 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33149 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:40,193 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33149 {}] master.ServerManager(517): Registering regionserver=2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:40,197 DEBUG [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0 2024-11-12T14:32:40,197 DEBUG [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39957 2024-11-12T14:32:40,197 DEBUG [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T14:32:40,198 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33149 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2b6d221c5cde,40627,1731421958890 2024-11-12T14:32:40,199 DEBUG [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0 2024-11-12T14:32:40,199 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33149 {}] master.ServerManager(517): Registering regionserver=2b6d221c5cde,40627,1731421958890 2024-11-12T14:32:40,199 DEBUG [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39957 2024-11-12T14:32:40,199 DEBUG [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T14:32:40,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:54546 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:39633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54546 dst: /127.0.0.1:39633 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:40,203 DEBUG [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0 2024-11-12T14:32:40,203 DEBUG [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39957 2024-11-12T14:32:40,203 DEBUG [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T14:32:40,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-12T14:32:40,206 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:40,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:40,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T14:32:40,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T14:32:40,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T14:32:40,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:40,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:40,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T14:32:40,217 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T14:32:40,217 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:40,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:40,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T14:32:40,222 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T14:32:40,222 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:40,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:40,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T14:32:40,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T14:32:40,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:40,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:40,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T14:32:40,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740 2024-11-12T14:32:40,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740 2024-11-12T14:32:40,235 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T14:32:40,235 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T14:32:40,236 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T14:32:40,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T14:32:40,250 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T14:32:40,251 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70088423, jitterRate=0.04439888894557953}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T14:32:40,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731421960208Initializing all the Stores at 1731421960210 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421960210Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421960210Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421960210Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421960210Cleaning up temporary data from old regions at 1731421960235 (+25 ms)Region opened successfully at 1731421960255 (+20 ms) 2024-11-12T14:32:40,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T14:32:40,255 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T14:32:40,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T14:32:40,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T14:32:40,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T14:32:40,257 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T14:32:40,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731421960255Disabling compacts and flushes for region at 1731421960255Disabling writes for close at 1731421960255Writing region close event to WAL at 1731421960257 (+2 ms)Closed at 1731421960257 2024-11-12T14:32:40,260 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T14:32:40,260 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T14:32:40,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T14:32:40,275 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T14:32:40,279 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T14:32:40,282 DEBUG [RS:1;2b6d221c5cde:39331 {}] zookeeper.ZKUtil(111): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:40,282 WARN [RS:1;2b6d221c5cde:39331 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T14:32:40,282 DEBUG [RS:2;2b6d221c5cde:36673 {}] zookeeper.ZKUtil(111): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:40,282 DEBUG [RS:0;2b6d221c5cde:40627 {}] zookeeper.ZKUtil(111): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2b6d221c5cde,40627,1731421958890 2024-11-12T14:32:40,282 INFO [RS:1;2b6d221c5cde:39331 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T14:32:40,282 WARN [RS:2;2b6d221c5cde:36673 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T14:32:40,282 WARN [RS:0;2b6d221c5cde:40627 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T14:32:40,282 INFO [RS:2;2b6d221c5cde:36673 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T14:32:40,282 INFO [RS:0;2b6d221c5cde:40627 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T14:32:40,282 DEBUG [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:40,282 DEBUG [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:40,283 DEBUG [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,40627,1731421958890 2024-11-12T14:32:40,284 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2b6d221c5cde,39331,1731421958988] 2024-11-12T14:32:40,284 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2b6d221c5cde,40627,1731421958890] 2024-11-12T14:32:40,284 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2b6d221c5cde,36673,1731421959037] 2024-11-12T14:32:40,309 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T14:32:40,309 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T14:32:40,309 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T14:32:40,324 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T14:32:40,324 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T14:32:40,325 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T14:32:40,330 INFO [RS:1;2b6d221c5cde:39331 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T14:32:40,330 INFO [RS:2;2b6d221c5cde:36673 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T14:32:40,330 INFO [RS:0;2b6d221c5cde:40627 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T14:32:40,330 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,330 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,330 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,331 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T14:32:40,331 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T14:32:40,331 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T14:32:40,337 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T14:32:40,337 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T14:32:40,337 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T14:32:40,339 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,339 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,339 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,339 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,339 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2b6d221c5cde:0, corePoolSize=2, maxPoolSize=2 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2b6d221c5cde:0, corePoolSize=2, maxPoolSize=2 2024-11-12T14:32:40,340 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2b6d221c5cde:0, corePoolSize=2, maxPoolSize=2 2024-11-12T14:32:40,340 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:40,340 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:40,340 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:40,340 DEBUG [RS:0;2b6d221c5cde:40627 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:40,340 DEBUG [RS:2;2b6d221c5cde:36673 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:40,340 DEBUG [RS:1;2b6d221c5cde:39331 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:40,341 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,341 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,40627,1731421958890-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,39331,1731421958988-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,342 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,36673,1731421959037-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T14:32:40,359 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T14:32:40,359 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T14:32:40,359 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T14:32:40,361 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,39331,1731421958988-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,361 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,40627,1731421958890-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,361 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,36673,1731421959037-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,361 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,362 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,362 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,362 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.Replication(171): 2b6d221c5cde,39331,1731421958988 started 2024-11-12T14:32:40,362 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.Replication(171): 2b6d221c5cde,36673,1731421959037 started 2024-11-12T14:32:40,362 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.Replication(171): 2b6d221c5cde,40627,1731421958890 started 2024-11-12T14:32:40,381 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,381 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(1482): Serving as 2b6d221c5cde,39331,1731421958988, RpcServer on 2b6d221c5cde/172.17.0.3:39331, sessionid=0x1012f7668900002 2024-11-12T14:32:40,382 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T14:32:40,382 DEBUG [RS:1;2b6d221c5cde:39331 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:40,382 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,39331,1731421958988' 2024-11-12T14:32:40,382 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T14:32:40,383 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T14:32:40,384 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T14:32:40,384 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T14:32:40,384 DEBUG [RS:1;2b6d221c5cde:39331 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:40,384 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,39331,1731421958988' 2024-11-12T14:32:40,384 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T14:32:40,385 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T14:32:40,385 DEBUG [RS:1;2b6d221c5cde:39331 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T14:32:40,385 INFO [RS:1;2b6d221c5cde:39331 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T14:32:40,385 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,385 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:40,385 INFO [RS:1;2b6d221c5cde:39331 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T14:32:40,385 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(1482): Serving as 2b6d221c5cde,36673,1731421959037, RpcServer on 2b6d221c5cde/172.17.0.3:36673, sessionid=0x1012f7668900003 2024-11-12T14:32:40,385 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(1482): Serving as 2b6d221c5cde,40627,1731421958890, RpcServer on 2b6d221c5cde/172.17.0.3:40627, sessionid=0x1012f7668900001 2024-11-12T14:32:40,386 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T14:32:40,386 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T14:32:40,386 DEBUG [RS:0;2b6d221c5cde:40627 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2b6d221c5cde,40627,1731421958890 2024-11-12T14:32:40,386 DEBUG [RS:2;2b6d221c5cde:36673 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:40,386 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,36673,1731421959037' 2024-11-12T14:32:40,386 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,40627,1731421958890' 2024-11-12T14:32:40,386 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T14:32:40,386 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T14:32:40,387 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T14:32:40,387 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T14:32:40,387 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T14:32:40,387 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T14:32:40,387 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T14:32:40,387 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T14:32:40,387 DEBUG [RS:0;2b6d221c5cde:40627 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2b6d221c5cde,40627,1731421958890 2024-11-12T14:32:40,387 DEBUG [RS:2;2b6d221c5cde:36673 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:40,387 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,40627,1731421958890' 2024-11-12T14:32:40,387 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,36673,1731421959037' 2024-11-12T14:32:40,388 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T14:32:40,388 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T14:32:40,388 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T14:32:40,388 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T14:32:40,389 DEBUG [RS:2;2b6d221c5cde:36673 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T14:32:40,389 INFO [RS:2;2b6d221c5cde:36673 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T14:32:40,389 INFO [RS:2;2b6d221c5cde:36673 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T14:32:40,389 DEBUG [RS:0;2b6d221c5cde:40627 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T14:32:40,389 INFO [RS:0;2b6d221c5cde:40627 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T14:32:40,389 INFO [RS:0;2b6d221c5cde:40627 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T14:32:40,430 WARN [2b6d221c5cde:33149 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T14:32:40,492 INFO [RS:1;2b6d221c5cde:39331 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T14:32:40,492 INFO [RS:2;2b6d221c5cde:36673 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T14:32:40,492 INFO [RS:0;2b6d221c5cde:40627 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-12T14:32:40,497 INFO [RS:0;2b6d221c5cde:40627 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C40627%2C1731421958890, suffix=, logDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,40627,1731421958890, archiveDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/oldWALs, maxLogs=32 2024-11-12T14:32:40,497 INFO [RS:2;2b6d221c5cde:36673 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C36673%2C1731421959037, suffix=, logDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,36673,1731421959037, archiveDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/oldWALs, maxLogs=32 2024-11-12T14:32:40,497 INFO [RS:1;2b6d221c5cde:39331 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C39331%2C1731421958988, suffix=, logDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,39331,1731421958988, archiveDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/oldWALs, maxLogs=32 2024-11-12T14:32:40,514 DEBUG [RS:0;2b6d221c5cde:40627 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,40627,1731421958890/2b6d221c5cde%2C40627%2C1731421958890.1731421960502, exclude list is [], retry=0 2024-11-12T14:32:40,514 DEBUG [RS:2;2b6d221c5cde:36673 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,36673,1731421959037/2b6d221c5cde%2C36673%2C1731421959037.1731421960502, exclude list is [], retry=0 2024-11-12T14:32:40,515 DEBUG [RS:1;2b6d221c5cde:39331 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,39331,1731421958988/2b6d221c5cde%2C39331%2C1731421958988.1731421960502, exclude list is [], retry=0 2024-11-12T14:32:40,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34979,DS-c8cc485f-7cfc-4031-a115-8cf1c95fcfce,DISK] 2024-11-12T14:32:40,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34979,DS-c8cc485f-7cfc-4031-a115-8cf1c95fcfce,DISK] 2024-11-12T14:32:40,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39633,DS-173cc6dd-8f7d-4e6a-9dff-ed5a70f45d17,DISK] 2024-11-12T14:32:40,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45343,DS-aa932ba3-a8b2-4490-8191-b38d3142db20,DISK] 2024-11-12T14:32:40,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45343,DS-aa932ba3-a8b2-4490-8191-b38d3142db20,DISK] 2024-11-12T14:32:40,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34979,DS-c8cc485f-7cfc-4031-a115-8cf1c95fcfce,DISK] 2024-11-12T14:32:40,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39633,DS-173cc6dd-8f7d-4e6a-9dff-ed5a70f45d17,DISK] 2024-11-12T14:32:40,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39633,DS-173cc6dd-8f7d-4e6a-9dff-ed5a70f45d17,DISK] 2024-11-12T14:32:40,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45343,DS-aa932ba3-a8b2-4490-8191-b38d3142db20,DISK] 2024-11-12T14:32:40,563 INFO [RS:1;2b6d221c5cde:39331 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,39331,1731421958988/2b6d221c5cde%2C39331%2C1731421958988.1731421960502 2024-11-12T14:32:40,564 INFO [RS:0;2b6d221c5cde:40627 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,40627,1731421958890/2b6d221c5cde%2C40627%2C1731421958890.1731421960502 2024-11-12T14:32:40,564 INFO [RS:2;2b6d221c5cde:36673 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,36673,1731421959037/2b6d221c5cde%2C36673%2C1731421959037.1731421960502 2024-11-12T14:32:40,565 DEBUG [RS:1;2b6d221c5cde:39331 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37193:37193),(127.0.0.1/127.0.0.1:45153:45153),(127.0.0.1/127.0.0.1:34333:34333)] 2024-11-12T14:32:40,567 DEBUG [RS:0;2b6d221c5cde:40627 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45153:45153),(127.0.0.1/127.0.0.1:37193:37193),(127.0.0.1/127.0.0.1:34333:34333)] 2024-11-12T14:32:40,568 DEBUG [RS:2;2b6d221c5cde:36673 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37193:37193),(127.0.0.1/127.0.0.1:34333:34333),(127.0.0.1/127.0.0.1:45153:45153)] 2024-11-12T14:32:40,685 DEBUG [2b6d221c5cde:33149 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-12T14:32:40,696 DEBUG [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(204): Hosts are {2b6d221c5cde=0} racks are {/default-rack=0} 2024-11-12T14:32:40,701 DEBUG [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T14:32:40,702 DEBUG [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T14:32:40,702 DEBUG [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T14:32:40,702 DEBUG [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T14:32:40,702 DEBUG [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T14:32:40,702 DEBUG [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T14:32:40,702 INFO [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T14:32:40,702 INFO [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T14:32:40,702 INFO [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T14:32:40,702 DEBUG [2b6d221c5cde:33149 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T14:32:40,709 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:40,715 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2b6d221c5cde,36673,1731421959037, state=OPENING 2024-11-12T14:32:40,767 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T14:32:40,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:40,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:40,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:40,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:40,781 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:40,781 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:40,781 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:40,781 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:40,782 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T14:32:40,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2b6d221c5cde,36673,1731421959037}] 2024-11-12T14:32:40,965 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T14:32:40,966 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41321, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T14:32:40,978 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T14:32:40,979 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-12T14:32:40,979 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-12T14:32:40,983 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C36673%2C1731421959037.meta, suffix=.meta, logDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,36673,1731421959037, archiveDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/oldWALs, maxLogs=32 2024-11-12T14:32:41,001 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,36673,1731421959037/2b6d221c5cde%2C36673%2C1731421959037.meta.1731421960985.meta, exclude list is [], retry=0 2024-11-12T14:32:41,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39633,DS-173cc6dd-8f7d-4e6a-9dff-ed5a70f45d17,DISK] 2024-11-12T14:32:41,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34979,DS-c8cc485f-7cfc-4031-a115-8cf1c95fcfce,DISK] 2024-11-12T14:32:41,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45343,DS-aa932ba3-a8b2-4490-8191-b38d3142db20,DISK] 2024-11-12T14:32:41,009 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/WALs/2b6d221c5cde,36673,1731421959037/2b6d221c5cde%2C36673%2C1731421959037.meta.1731421960985.meta 2024-11-12T14:32:41,009 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45153:45153),(127.0.0.1/127.0.0.1:37193:37193),(127.0.0.1/127.0.0.1:34333:34333)] 2024-11-12T14:32:41,010 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T14:32:41,011 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T14:32:41,014 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T14:32:41,018 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T14:32:41,021 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T14:32:41,022 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:41,022 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T14:32:41,022 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T14:32:41,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T14:32:41,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T14:32:41,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:41,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:41,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T14:32:41,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T14:32:41,030 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:41,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:41,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T14:32:41,032 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T14:32:41,032 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:41,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:41,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T14:32:41,034 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T14:32:41,034 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:41,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:41,035 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T14:32:41,037 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740 2024-11-12T14:32:41,039 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740 2024-11-12T14:32:41,041 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T14:32:41,041 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T14:32:41,042 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T14:32:41,044 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T14:32:41,046 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61526281, jitterRate=-0.08318696916103363}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T14:32:41,046 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T14:32:41,047 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731421961023Writing region info on filesystem at 1731421961023Initializing all the Stores at 1731421961025 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421961025Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421961025Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421961026 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421961026Cleaning up temporary data from old regions at 1731421961041 (+15 ms)Running coprocessor post-open hooks at 1731421961046 (+5 ms)Region opened successfully at 1731421961047 (+1 ms) 2024-11-12T14:32:41,054 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731421960956 2024-11-12T14:32:41,064 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T14:32:41,064 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T14:32:41,066 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:41,068 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2b6d221c5cde,36673,1731421959037, state=OPEN 2024-11-12T14:32:41,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T14:32:41,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T14:32:41,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T14:32:41,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T14:32:41,125 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:41,125 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:41,125 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:41,125 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:41,126 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:41,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T14:32:41,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2b6d221c5cde,36673,1731421959037 in 342 msec 2024-11-12T14:32:41,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T14:32:41,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 867 msec 2024-11-12T14:32:41,140 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T14:32:41,140 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T14:32:41,158 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T14:32:41,159 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2b6d221c5cde,36673,1731421959037, seqNum=-1] 2024-11-12T14:32:41,178 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T14:32:41,180 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56185, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T14:32:41,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1480 sec 2024-11-12T14:32:41,202 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731421961202, completionTime=-1 2024-11-12T14:32:41,204 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-12T14:32:41,204 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T14:32:41,250 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-12T14:32:41,250 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731422021250 2024-11-12T14:32:41,251 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731422081250 2024-11-12T14:32:41,251 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 46 msec 2024-11-12T14:32:41,252 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-12T14:32:41,259 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,33149,1731421958123-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:41,259 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,33149,1731421958123-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:41,259 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,33149,1731421958123-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:41,264 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2b6d221c5cde:33149, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:41,264 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:41,265 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:41,271 DEBUG [master/2b6d221c5cde:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T14:32:41,294 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.155sec 2024-11-12T14:32:41,296 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T14:32:41,297 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T14:32:41,298 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T14:32:41,298 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T14:32:41,298 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T14:32:41,299 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,33149,1731421958123-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T14:32:41,299 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,33149,1731421958123-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T14:32:41,303 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T14:32:41,304 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T14:32:41,304 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,33149,1731421958123-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:41,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d855a99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T14:32:41,386 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-12T14:32:41,386 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-12T14:32:41,389 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2b6d221c5cde,33149,-1 for getting cluster id 2024-11-12T14:32:41,391 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T14:32:41,399 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4bc05241-efb1-40ab-a901-4b40ae3907ed' 2024-11-12T14:32:41,401 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T14:32:41,401 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4bc05241-efb1-40ab-a901-4b40ae3907ed" 2024-11-12T14:32:41,401 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@475b6866, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T14:32:41,402 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2b6d221c5cde,33149,-1] 2024-11-12T14:32:41,404 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T14:32:41,406 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:41,407 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41570, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T14:32:41,409 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6842ac24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T14:32:41,410 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T14:32:41,416 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2b6d221c5cde,36673,1731421959037, seqNum=-1] 2024-11-12T14:32:41,417 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T14:32:41,419 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:32832, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T14:32:41,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:41,441 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T14:32:41,445 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:41,447 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@22bbd991 2024-11-12T14:32:41,448 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T14:32:41,451 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41572, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T14:32:41,456 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T14:32:41,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-12T14:32:41,466 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T14:32:41,468 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-12T14:32:41,468 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:41,471 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T14:32:41,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T14:32:41,479 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:41,479 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:41,482 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:59704 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:45343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59704 dst: /127.0.0.1:45343 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:41,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-12T14:32:41,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T14:32:41,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T14:32:41,891 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:41,895 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c7116e3c193693da4c76f7ac835cf1b6, NAME => 'TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0 2024-11-12T14:32:41,903 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:41,904 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:41,906 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:36048 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:45343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36048 dst: /127.0.0.1:45343 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:41,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-12T14:32:41,910 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:41,911 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:41,911 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing c7116e3c193693da4c76f7ac835cf1b6, disabling compactions & flushes 2024-11-12T14:32:41,911 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:41,911 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:41,911 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. after waiting 0 ms 2024-11-12T14:32:41,911 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:41,911 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:41,911 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for c7116e3c193693da4c76f7ac835cf1b6: Waiting for close lock at 1731421961911Disabling compacts and flushes for region at 1731421961911Disabling writes for close at 1731421961911Writing region close event to WAL at 1731421961911Closed at 1731421961911 2024-11-12T14:32:41,913 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T14:32:41,918 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731421961914"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731421961914"}]},"ts":"1731421961914"} 2024-11-12T14:32:41,922 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T14:32:41,924 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T14:32:41,926 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731421961924"}]},"ts":"1731421961924"} 2024-11-12T14:32:41,931 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-12T14:32:41,931 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2b6d221c5cde=0} racks are {/default-rack=0} 2024-11-12T14:32:41,932 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T14:32:41,932 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T14:32:41,932 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T14:32:41,932 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T14:32:41,932 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T14:32:41,932 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T14:32:41,932 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T14:32:41,932 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T14:32:41,932 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T14:32:41,932 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T14:32:41,934 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c7116e3c193693da4c76f7ac835cf1b6, ASSIGN}] 2024-11-12T14:32:41,936 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c7116e3c193693da4c76f7ac835cf1b6, ASSIGN 2024-11-12T14:32:41,938 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c7116e3c193693da4c76f7ac835cf1b6, ASSIGN; state=OFFLINE, location=2b6d221c5cde,39331,1731421958988; forceNewPlan=false, retain=false 2024-11-12T14:32:42,090 INFO [2b6d221c5cde:33149 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-12T14:32:42,091 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c7116e3c193693da4c76f7ac835cf1b6, regionState=OPENING, regionLocation=2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:42,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c7116e3c193693da4c76f7ac835cf1b6, ASSIGN because future has completed 2024-11-12T14:32:42,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c7116e3c193693da4c76f7ac835cf1b6, server=2b6d221c5cde,39331,1731421958988}] 2024-11-12T14:32:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T14:32:42,253 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T14:32:42,258 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35977, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T14:32:42,266 INFO [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:42,266 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c7116e3c193693da4c76f7ac835cf1b6, NAME => 'TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6.', STARTKEY => '', ENDKEY => ''} 2024-11-12T14:32:42,266 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,267 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:42,267 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,267 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,269 INFO [StoreOpener-c7116e3c193693da4c76f7ac835cf1b6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,271 INFO [StoreOpener-c7116e3c193693da4c76f7ac835cf1b6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c7116e3c193693da4c76f7ac835cf1b6 columnFamilyName cf 2024-11-12T14:32:42,272 DEBUG [StoreOpener-c7116e3c193693da4c76f7ac835cf1b6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:42,272 INFO [StoreOpener-c7116e3c193693da4c76f7ac835cf1b6-1 {}] regionserver.HStore(327): Store=c7116e3c193693da4c76f7ac835cf1b6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T14:32:42,273 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,274 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,275 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,276 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,276 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,278 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,283 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T14:32:42,283 INFO [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c7116e3c193693da4c76f7ac835cf1b6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64680053, jitterRate=-0.03619210422039032}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T14:32:42,283 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:42,284 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c7116e3c193693da4c76f7ac835cf1b6: Running coprocessor pre-open hook at 1731421962267Writing region info on filesystem at 1731421962267Initializing all the Stores at 1731421962269 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421962269Cleaning up temporary data from old regions at 1731421962276 (+7 ms)Running coprocessor post-open hooks at 1731421962284 (+8 ms)Region opened successfully at 1731421962284 2024-11-12T14:32:42,286 INFO [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6., pid=6, masterSystemTime=1731421962252 2024-11-12T14:32:42,289 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:42,289 INFO [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:42,290 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c7116e3c193693da4c76f7ac835cf1b6, regionState=OPEN, openSeqNum=2, regionLocation=2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:42,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c7116e3c193693da4c76f7ac835cf1b6, server=2b6d221c5cde,39331,1731421958988 because future has completed 2024-11-12T14:32:42,299 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T14:32:42,299 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c7116e3c193693da4c76f7ac835cf1b6, server=2b6d221c5cde,39331,1731421958988 in 199 msec 2024-11-12T14:32:42,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T14:32:42,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=c7116e3c193693da4c76f7ac835cf1b6, ASSIGN in 365 msec 2024-11-12T14:32:42,304 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T14:32:42,304 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731421962304"}]},"ts":"1731421962304"} 2024-11-12T14:32:42,307 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-12T14:32:42,308 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T14:32:42,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 849 msec 2024-11-12T14:32:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T14:32:42,615 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T14:32:42,615 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-12T14:32:42,616 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T14:32:42,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-12T14:32:42,624 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T14:32:42,625 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-12T14:32:42,635 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6., hostname=2b6d221c5cde,39331,1731421958988, seqNum=2] 2024-11-12T14:32:42,636 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T14:32:42,638 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50144, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T14:32:42,648 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-12T14:32:42,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-12T14:32:42,657 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-12T14:32:42,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T14:32:42,659 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T14:32:42,661 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T14:32:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T14:32:42,822 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39331 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-12T14:32:42,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:42,827 INFO [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c7116e3c193693da4c76f7ac835cf1b6 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-12T14:32:42,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6/.tmp/cf/c4a325d4ebe44dccb4e897412a697978 is 36, key is row/cf:cq/1731421962639/Put/seqid=0 2024-11-12T14:32:42,892 WARN [IPC Server handler 3 on default port 39957 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-12T14:32:42,893 WARN [IPC Server handler 3 on default port 39957 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T14:32:42,893 WARN [IPC Server handler 3 on default port 39957 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T14:32:42,893 WARN [IPC Server handler 3 on default port 39957 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T14:32:42,897 WARN [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore(850): Failed flushing store file for c7116e3c193693da4c76f7ac835cf1b6/cf, retrying num=0 org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6/.tmp/cf/c4a325d4ebe44dccb4e897412a697978 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor7.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor7.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor7.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:137) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:112) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at java.io.DataOutputStream.write(DataOutputStream.java:112) ~[?:?] at java.io.ByteArrayOutputStream.writeTo(ByteArrayOutputStream.java:161) ~[?:?] at org.apache.hadoop.hbase.io.hfile.FixedFileTrailer.serialize(FixedFileTrailer.java:197) ~[classes/:?] at org.apache.hadoop.hbase.io.hfile.HFileWriterImpl.finishClose(HFileWriterImpl.java:855) ~[classes/:?] at org.apache.hadoop.hbase.io.hfile.HFileWriterImpl.close(HFileWriterImpl.java:680) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.StoreFileWriter$SingleStoreFileWriter.close(StoreFileWriter.java:787) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.StoreFileWriter.close(StoreFileWriter.java:294) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.StoreFlusher.finalizeWriter(StoreFlusher.java:70) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher.flushSnapshot(DefaultStoreFlusher.java:74) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:56) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:42,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-12T14:32:42,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-12T14:32:42,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-12T14:32:42,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-12T14:32:42,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-12T14:32:42,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-12T14:32:42,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-12T14:32:42,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-12T14:32:42,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-12T14:32:42,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-12T14:32:42,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T14:32:43,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T14:32:43,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T14:32:43,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6/.tmp/cf/8aebe203598a4598820e13a767e04347 is 36, key is row/cf:cq/1731421962639/Put/seqid=0 2024-11-12T14:32:43,918 WARN [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:43,918 WARN [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:43,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_854488787_22 at /127.0.0.1:40330 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:39633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40330 dst: /127.0.0.1:39633 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:43,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-12T14:32:43,927 WARN [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:43,928 INFO [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6/.tmp/cf/8aebe203598a4598820e13a767e04347 2024-11-12T14:32:43,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6/.tmp/cf/8aebe203598a4598820e13a767e04347 as hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6/cf/8aebe203598a4598820e13a767e04347 2024-11-12T14:32:43,974 INFO [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6/cf/8aebe203598a4598820e13a767e04347, entries=1, sequenceid=5, filesize=4.7 K 2024-11-12T14:32:43,980 INFO [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for c7116e3c193693da4c76f7ac835cf1b6 in 1154ms, sequenceid=5, compaction requested=false 2024-11-12T14:32:43,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-12T14:32:43,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c7116e3c193693da4c76f7ac835cf1b6: 2024-11-12T14:32:43,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:43,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-12T14:32:43,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-12T14:32:43,993 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T14:32:43,993 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3280 sec 2024-11-12T14:32:43,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 1.3440 sec 2024-11-12T14:32:44,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33149 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T14:32:44,815 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T14:32:44,834 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T14:32:44,834 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T14:32:44,835 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:44,838 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:44,839 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:44,839 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T14:32:44,839 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T14:32:44,839 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1075752841, stopped=false 2024-11-12T14:32:44,839 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2b6d221c5cde,33149,1731421958123 2024-11-12T14:32:44,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:44,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:44,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:44,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:44,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:44,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:44,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:44,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:44,893 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T14:32:44,894 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T14:32:44,894 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:44,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:44,894 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:44,894 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:44,895 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2b6d221c5cde,40627,1731421958890' ***** 2024-11-12T14:32:44,895 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T14:32:44,895 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:44,895 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:44,895 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2b6d221c5cde,39331,1731421958988' ***** 2024-11-12T14:32:44,895 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T14:32:44,895 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T14:32:44,895 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2b6d221c5cde,36673,1731421959037' ***** 2024-11-12T14:32:44,895 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T14:32:44,895 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T14:32:44,896 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T14:32:44,896 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T14:32:44,896 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T14:32:44,896 INFO [RS:0;2b6d221c5cde:40627 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T14:32:44,896 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T14:32:44,896 INFO [RS:1;2b6d221c5cde:39331 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T14:32:44,896 INFO [RS:0;2b6d221c5cde:40627 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T14:32:44,896 INFO [RS:1;2b6d221c5cde:39331 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T14:32:44,896 INFO [RS:2;2b6d221c5cde:36673 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T14:32:44,896 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(959): stopping server 2b6d221c5cde,40627,1731421958890 2024-11-12T14:32:44,896 INFO [RS:2;2b6d221c5cde:36673 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T14:32:44,896 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T14:32:44,896 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(959): stopping server 2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:44,896 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T14:32:44,896 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(3091): Received CLOSE for c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:44,896 INFO [RS:0;2b6d221c5cde:40627 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2b6d221c5cde:40627. 2024-11-12T14:32:44,896 INFO [RS:2;2b6d221c5cde:36673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2b6d221c5cde:36673. 2024-11-12T14:32:44,896 DEBUG [RS:0;2b6d221c5cde:40627 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:44,896 DEBUG [RS:2;2b6d221c5cde:36673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:44,896 DEBUG [RS:0;2b6d221c5cde:40627 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:44,896 DEBUG [RS:2;2b6d221c5cde:36673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:44,896 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(976): stopping server 2b6d221c5cde,40627,1731421958890; all regions closed. 2024-11-12T14:32:44,896 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T14:32:44,897 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T14:32:44,897 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T14:32:44,897 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T14:32:44,897 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(959): stopping server 2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:44,897 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T14:32:44,897 INFO [RS:1;2b6d221c5cde:39331 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2b6d221c5cde:39331. 2024-11-12T14:32:44,897 DEBUG [RS:1;2b6d221c5cde:39331 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:44,897 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T14:32:44,897 DEBUG [RS:1;2b6d221c5cde:39331 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:44,897 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T14:32:44,897 DEBUG [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-12T14:32:44,897 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c7116e3c193693da4c76f7ac835cf1b6, disabling compactions & flushes 2024-11-12T14:32:44,897 DEBUG [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(1325): Online Regions={c7116e3c193693da4c76f7ac835cf1b6=TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6.} 2024-11-12T14:32:44,897 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T14:32:44,897 INFO [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:44,897 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T14:32:44,897 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:44,897 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T14:32:44,898 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. after waiting 0 ms 2024-11-12T14:32:44,898 DEBUG [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(1351): Waiting on c7116e3c193693da4c76f7ac835cf1b6 2024-11-12T14:32:44,898 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T14:32:44,898 DEBUG [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T14:32:44,898 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:44,898 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T14:32:44,898 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-12T14:32:44,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_1073741827_1017 (size=93) 2024-11-12T14:32:44,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_1073741827_1017 (size=93) 2024-11-12T14:32:44,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_1073741827_1017 (size=93) 2024-11-12T14:32:44,910 DEBUG [RS:0;2b6d221c5cde:40627 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/oldWALs 2024-11-12T14:32:44,910 INFO [RS:0;2b6d221c5cde:40627 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2b6d221c5cde%2C40627%2C1731421958890:(num 1731421960502) 2024-11-12T14:32:44,910 DEBUG [RS:0;2b6d221c5cde:40627 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:44,910 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:44,910 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T14:32:44,911 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.ChoreService(370): Chore service for: regionserver/2b6d221c5cde:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T14:32:44,911 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T14:32:44,911 INFO [regionserver/2b6d221c5cde:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T14:32:44,911 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T14:32:44,911 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T14:32:44,911 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T14:32:44,911 INFO [RS:0;2b6d221c5cde:40627 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40627 2024-11-12T14:32:44,912 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/default/TestHBaseWalOnEC/c7116e3c193693da4c76f7ac835cf1b6/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-12T14:32:44,915 INFO [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:44,915 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c7116e3c193693da4c76f7ac835cf1b6: Waiting for close lock at 1731421964897Running coprocessor pre-close hooks at 1731421964897Disabling compacts and flushes for region at 1731421964897Disabling writes for close at 1731421964898 (+1 ms)Writing region close event to WAL at 1731421964899 (+1 ms)Running coprocessor post-close hooks at 1731421964913 (+14 ms)Closed at 1731421964915 (+2 ms) 2024-11-12T14:32:44,915 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6. 2024-11-12T14:32:44,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T14:32:44,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2b6d221c5cde,40627,1731421958890 2024-11-12T14:32:44,925 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T14:32:44,926 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2b6d221c5cde,40627,1731421958890] 2024-11-12T14:32:44,935 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/.tmp/info/70597d66972147dc98112238ba92a9d7 is 153, key is TestHBaseWalOnEC,,1731421961452.c7116e3c193693da4c76f7ac835cf1b6./info:regioninfo/1731421962290/Put/seqid=0 2024-11-12T14:32:44,938 WARN [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:44,938 WARN [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:44,943 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1347911687_22 at /127.0.0.1:48074 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:34979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48074 dst: /127.0.0.1:34979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:44,947 INFO [regionserver/2b6d221c5cde:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:44,947 INFO [regionserver/2b6d221c5cde:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:44,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-12T14:32:44,947 INFO [regionserver/2b6d221c5cde:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:44,948 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2b6d221c5cde,40627,1731421958890 already deleted, retry=false 2024-11-12T14:32:44,948 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2b6d221c5cde,40627,1731421958890 expired; onlineServers=2 2024-11-12T14:32:44,948 WARN [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:44,948 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/.tmp/info/70597d66972147dc98112238ba92a9d7 2024-11-12T14:32:44,976 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/.tmp/ns/d96f597ea6eb442db36a7bb2356eab34 is 43, key is default/ns:d/1731421961184/Put/seqid=0 2024-11-12T14:32:44,978 WARN [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:44,978 WARN [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:44,983 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1347911687_22 at /127.0.0.1:36130 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:45343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36130 dst: /127.0.0.1:45343 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:44,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-12T14:32:44,987 WARN [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:44,988 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/.tmp/ns/d96f597ea6eb442db36a7bb2356eab34 2024-11-12T14:32:45,013 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/.tmp/table/b0691d7df3294c99a6dfc8589430f4a7 is 52, key is TestHBaseWalOnEC/table:state/1731421962304/Put/seqid=0 2024-11-12T14:32:45,015 WARN [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,015 WARN [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1347911687_22 at /127.0.0.1:48110 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:34979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48110 dst: /127.0.0.1:34979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:45,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-12T14:32:45,022 WARN [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:45,022 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/.tmp/table/b0691d7df3294c99a6dfc8589430f4a7 2024-11-12T14:32:45,033 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/.tmp/info/70597d66972147dc98112238ba92a9d7 as hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/info/70597d66972147dc98112238ba92a9d7 2024-11-12T14:32:45,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:45,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40627-0x1012f7668900001, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:45,038 INFO [RS:0;2b6d221c5cde:40627 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T14:32:45,039 INFO [RS:0;2b6d221c5cde:40627 {}] regionserver.HRegionServer(1031): Exiting; stopping=2b6d221c5cde,40627,1731421958890; zookeeper connection closed. 2024-11-12T14:32:45,039 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3443de87 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3443de87 2024-11-12T14:32:45,045 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/info/70597d66972147dc98112238ba92a9d7, entries=10, sequenceid=11, filesize=6.5 K 2024-11-12T14:32:45,047 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/.tmp/ns/d96f597ea6eb442db36a7bb2356eab34 as hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/ns/d96f597ea6eb442db36a7bb2356eab34 2024-11-12T14:32:45,058 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/ns/d96f597ea6eb442db36a7bb2356eab34, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T14:32:45,060 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/.tmp/table/b0691d7df3294c99a6dfc8589430f4a7 as hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/table/b0691d7df3294c99a6dfc8589430f4a7 2024-11-12T14:32:45,071 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/table/b0691d7df3294c99a6dfc8589430f4a7, entries=2, sequenceid=11, filesize=5.1 K 2024-11-12T14:32:45,073 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 175ms, sequenceid=11, compaction requested=false 2024-11-12T14:32:45,073 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-12T14:32:45,085 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T14:32:45,086 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T14:32:45,086 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T14:32:45,086 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731421964897Running coprocessor pre-close hooks at 1731421964897Disabling compacts and flushes for region at 1731421964897Disabling writes for close at 1731421964898 (+1 ms)Obtaining lock to block concurrent updates at 1731421964898Preparing flush snapshotting stores in 1588230740 at 1731421964898Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731421964899 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731421964900 (+1 ms)Flushing 1588230740/info: creating writer at 1731421964900Flushing 1588230740/info: appending metadata at 1731421964930 (+30 ms)Flushing 1588230740/info: closing flushed file at 1731421964930Flushing 1588230740/ns: creating writer at 1731421964959 (+29 ms)Flushing 1588230740/ns: appending metadata at 1731421964975 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731421964975Flushing 1588230740/table: creating writer at 1731421964996 (+21 ms)Flushing 1588230740/table: appending metadata at 1731421965012 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731421965012Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69f879ab: reopening flushed file at 1731421965032 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1495facc: reopening flushed file at 1731421965045 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23f9b3fe: reopening flushed file at 1731421965058 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 175ms, sequenceid=11, compaction requested=false at 1731421965073 (+15 ms)Writing region close event to WAL at 1731421965075 (+2 ms)Running coprocessor post-close hooks at 1731421965086 (+11 ms)Closed at 1731421965086 2024-11-12T14:32:45,087 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T14:32:45,098 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(976): stopping server 2b6d221c5cde,39331,1731421958988; all regions closed. 2024-11-12T14:32:45,098 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(976): stopping server 2b6d221c5cde,36673,1731421959037; all regions closed. 2024-11-12T14:32:45,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_1073741828_1018 (size=1298) 2024-11-12T14:32:45,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_1073741828_1018 (size=1298) 2024-11-12T14:32:45,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_1073741829_1019 (size=2751) 2024-11-12T14:32:45,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_1073741829_1019 (size=2751) 2024-11-12T14:32:45,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_1073741828_1018 (size=1298) 2024-11-12T14:32:45,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_1073741829_1019 (size=2751) 2024-11-12T14:32:45,106 DEBUG [RS:1;2b6d221c5cde:39331 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/oldWALs 2024-11-12T14:32:45,106 INFO [RS:1;2b6d221c5cde:39331 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2b6d221c5cde%2C39331%2C1731421958988:(num 1731421960502) 2024-11-12T14:32:45,106 DEBUG [RS:1;2b6d221c5cde:39331 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:45,106 DEBUG [RS:2;2b6d221c5cde:36673 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/oldWALs 2024-11-12T14:32:45,106 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:45,106 INFO [RS:2;2b6d221c5cde:36673 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2b6d221c5cde%2C36673%2C1731421959037.meta:.meta(num 1731421960985) 2024-11-12T14:32:45,106 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T14:32:45,106 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.ChoreService(370): Chore service for: regionserver/2b6d221c5cde:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T14:32:45,106 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T14:32:45,107 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T14:32:45,107 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T14:32:45,107 INFO [regionserver/2b6d221c5cde:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T14:32:45,107 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T14:32:45,107 INFO [RS:1;2b6d221c5cde:39331 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39331 2024-11-12T14:32:45,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_1073741826_1016 (size=93) 2024-11-12T14:32:45,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_1073741826_1016 (size=93) 2024-11-12T14:32:45,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_1073741826_1016 (size=93) 2024-11-12T14:32:45,113 DEBUG [RS:2;2b6d221c5cde:36673 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/oldWALs 2024-11-12T14:32:45,113 INFO [RS:2;2b6d221c5cde:36673 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 2b6d221c5cde%2C36673%2C1731421959037:(num 1731421960502) 2024-11-12T14:32:45,113 DEBUG [RS:2;2b6d221c5cde:36673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:45,113 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:45,113 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T14:32:45,113 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.ChoreService(370): Chore service for: regionserver/2b6d221c5cde:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T14:32:45,113 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T14:32:45,113 INFO [regionserver/2b6d221c5cde:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T14:32:45,114 INFO [RS:2;2b6d221c5cde:36673 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36673 2024-11-12T14:32:45,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2b6d221c5cde,39331,1731421958988 2024-11-12T14:32:45,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T14:32:45,116 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T14:32:45,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2b6d221c5cde,36673,1731421959037 2024-11-12T14:32:45,127 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T14:32:45,138 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2b6d221c5cde,39331,1731421958988] 2024-11-12T14:32:45,159 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2b6d221c5cde,39331,1731421958988 already deleted, retry=false 2024-11-12T14:32:45,159 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2b6d221c5cde,39331,1731421958988 expired; onlineServers=1 2024-11-12T14:32:45,159 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2b6d221c5cde,36673,1731421959037] 2024-11-12T14:32:45,169 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2b6d221c5cde,36673,1731421959037 already deleted, retry=false 2024-11-12T14:32:45,169 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2b6d221c5cde,36673,1731421959037 expired; onlineServers=0 2024-11-12T14:32:45,170 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2b6d221c5cde,33149,1731421958123' ***** 2024-11-12T14:32:45,170 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T14:32:45,170 INFO [M:0;2b6d221c5cde:33149 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T14:32:45,170 INFO [M:0;2b6d221c5cde:33149 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T14:32:45,171 DEBUG [M:0;2b6d221c5cde:33149 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T14:32:45,171 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T14:32:45,171 DEBUG [M:0;2b6d221c5cde:33149 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T14:32:45,171 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.large.0-1731421960154 {}] cleaner.HFileCleaner(306): Exit Thread[master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.large.0-1731421960154,5,FailOnTimeoutGroup] 2024-11-12T14:32:45,171 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.small.0-1731421960160 {}] cleaner.HFileCleaner(306): Exit Thread[master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.small.0-1731421960160,5,FailOnTimeoutGroup] 2024-11-12T14:32:45,172 INFO [M:0;2b6d221c5cde:33149 {}] hbase.ChoreService(370): Chore service for: master/2b6d221c5cde:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T14:32:45,172 INFO [M:0;2b6d221c5cde:33149 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T14:32:45,172 DEBUG [M:0;2b6d221c5cde:33149 {}] master.HMaster(1795): Stopping service threads 2024-11-12T14:32:45,173 INFO [M:0;2b6d221c5cde:33149 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T14:32:45,173 INFO [M:0;2b6d221c5cde:33149 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T14:32:45,174 INFO [M:0;2b6d221c5cde:33149 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T14:32:45,175 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T14:32:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T14:32:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:45,180 DEBUG [M:0;2b6d221c5cde:33149 {}] zookeeper.ZKUtil(347): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T14:32:45,181 WARN [M:0;2b6d221c5cde:33149 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T14:32:45,182 INFO [M:0;2b6d221c5cde:33149 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/.lastflushedseqids 2024-11-12T14:32:45,193 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,193 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,195 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:40358 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39633:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40358 dst: /127.0.0.1:39633 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:45,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-12T14:32:45,200 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:45,200 INFO [M:0;2b6d221c5cde:33149 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T14:32:45,200 INFO [M:0;2b6d221c5cde:33149 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T14:32:45,200 DEBUG [M:0;2b6d221c5cde:33149 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T14:32:45,200 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:45,200 DEBUG [M:0;2b6d221c5cde:33149 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:45,200 DEBUG [M:0;2b6d221c5cde:33149 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T14:32:45,201 DEBUG [M:0;2b6d221c5cde:33149 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:45,201 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-12T14:32:45,220 DEBUG [M:0;2b6d221c5cde:33149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dcd9f95b12ad4e7789be85dec2e798f5 is 82, key is hbase:meta,,1/info:regioninfo/1731421961066/Put/seqid=0 2024-11-12T14:32:45,222 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,222 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,225 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:48126 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:34979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48126 dst: /127.0.0.1:34979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:45,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-12T14:32:45,230 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:45,230 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dcd9f95b12ad4e7789be85dec2e798f5 2024-11-12T14:32:45,238 INFO [RS:1;2b6d221c5cde:39331 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T14:32:45,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:45,238 INFO [RS:1;2b6d221c5cde:39331 {}] regionserver.HRegionServer(1031): Exiting; stopping=2b6d221c5cde,39331,1731421958988; zookeeper connection closed. 2024-11-12T14:32:45,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39331-0x1012f7668900002, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:45,238 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2cfacaac {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2cfacaac 2024-11-12T14:32:45,248 INFO [RS:2;2b6d221c5cde:36673 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T14:32:45,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:45,248 INFO [RS:2;2b6d221c5cde:36673 {}] regionserver.HRegionServer(1031): Exiting; stopping=2b6d221c5cde,36673,1731421959037; zookeeper connection closed. 2024-11-12T14:32:45,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36673-0x1012f7668900003, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:45,249 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@17b16333 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@17b16333 2024-11-12T14:32:45,249 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-12T14:32:45,254 DEBUG [M:0;2b6d221c5cde:33149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/335c503f31ec45c29d49a9adef80c6a9 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731421962310/Put/seqid=0 2024-11-12T14:32:45,257 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,257 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,259 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:48156 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:34979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48156 dst: /127.0.0.1:34979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:45,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775552_1037 (size=6440) 2024-11-12T14:32:45,264 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:45,264 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/335c503f31ec45c29d49a9adef80c6a9 2024-11-12T14:32:45,289 DEBUG [M:0;2b6d221c5cde:33149 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac8242b26c9048e6816028c221743374 is 69, key is 2b6d221c5cde,36673,1731421959037/rs:state/1731421960193/Put/seqid=0 2024-11-12T14:32:45,290 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,291 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-12T14:32:45,293 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016053447_22 at /127.0.0.1:48176 [Receiving block BP-1644289805-172.17.0.3-1731421953022:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:34979:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48176 dst: /127.0.0.1:34979 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-12T14:32:45,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-12T14:32:45,298 WARN [M:0;2b6d221c5cde:33149 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-12T14:32:45,298 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac8242b26c9048e6816028c221743374 2024-11-12T14:32:45,307 DEBUG [M:0;2b6d221c5cde:33149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dcd9f95b12ad4e7789be85dec2e798f5 as hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dcd9f95b12ad4e7789be85dec2e798f5 2024-11-12T14:32:45,316 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dcd9f95b12ad4e7789be85dec2e798f5, entries=8, sequenceid=72, filesize=5.5 K 2024-11-12T14:32:45,318 DEBUG [M:0;2b6d221c5cde:33149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/335c503f31ec45c29d49a9adef80c6a9 as hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/335c503f31ec45c29d49a9adef80c6a9 2024-11-12T14:32:45,327 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/335c503f31ec45c29d49a9adef80c6a9, entries=8, sequenceid=72, filesize=6.3 K 2024-11-12T14:32:45,329 DEBUG [M:0;2b6d221c5cde:33149 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac8242b26c9048e6816028c221743374 as hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac8242b26c9048e6816028c221743374 2024-11-12T14:32:45,337 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac8242b26c9048e6816028c221743374, entries=3, sequenceid=72, filesize=5.2 K 2024-11-12T14:32:45,338 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=72, compaction requested=false 2024-11-12T14:32:45,340 INFO [M:0;2b6d221c5cde:33149 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:45,340 DEBUG [M:0;2b6d221c5cde:33149 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731421965200Disabling compacts and flushes for region at 1731421965200Disabling writes for close at 1731421965201 (+1 ms)Obtaining lock to block concurrent updates at 1731421965201Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731421965201Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731421965202 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731421965202Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731421965203 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731421965219 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731421965219Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731421965238 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731421965254 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731421965254Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731421965273 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731421965288 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731421965288Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@144ce830: reopening flushed file at 1731421965306 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b658595: reopening flushed file at 1731421965317 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6807db6: reopening flushed file at 1731421965327 (+10 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=72, compaction requested=false at 1731421965338 (+11 ms)Writing region close event to WAL at 1731421965339 (+1 ms)Closed at 1731421965340 (+1 ms) 2024-11-12T14:32:45,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39633 is added to blk_1073741825_1011 (size=32683) 2024-11-12T14:32:45,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45343 is added to blk_1073741825_1011 (size=32683) 2024-11-12T14:32:45,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34979 is added to blk_1073741825_1011 (size=32683) 2024-11-12T14:32:45,344 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T14:32:45,344 INFO [M:0;2b6d221c5cde:33149 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T14:32:45,344 INFO [M:0;2b6d221c5cde:33149 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33149 2024-11-12T14:32:45,344 INFO [M:0;2b6d221c5cde:33149 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T14:32:45,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:45,459 INFO [M:0;2b6d221c5cde:33149 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T14:32:45,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33149-0x1012f7668900000, quorum=127.0.0.1:56395, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:45,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:45,510 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T14:32:45,510 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T14:32:45,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T14:32:45,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir/,STOPPED} 2024-11-12T14:32:45,515 WARN [BP-1644289805-172.17.0.3-1731421953022 heartbeating to localhost/127.0.0.1:39957 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T14:32:45,515 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T14:32:45,515 WARN [BP-1644289805-172.17.0.3-1731421953022 heartbeating to localhost/127.0.0.1:39957 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1644289805-172.17.0.3-1731421953022 (Datanode Uuid fdf1fafe-0af1-45e8-8b36-cbdef7e8d07e) service to localhost/127.0.0.1:39957 2024-11-12T14:32:45,515 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T14:32:45,517 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data5/current/BP-1644289805-172.17.0.3-1731421953022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:45,517 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data6/current/BP-1644289805-172.17.0.3-1731421953022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:45,518 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T14:32:45,555 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:45,556 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T14:32:45,556 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T14:32:45,556 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T14:32:45,556 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir/,STOPPED} 2024-11-12T14:32:45,560 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T14:32:45,560 WARN [BP-1644289805-172.17.0.3-1731421953022 heartbeating to localhost/127.0.0.1:39957 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T14:32:45,560 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T14:32:45,560 WARN [BP-1644289805-172.17.0.3-1731421953022 heartbeating to localhost/127.0.0.1:39957 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1644289805-172.17.0.3-1731421953022 (Datanode Uuid 13a1360e-497b-4c69-900e-f4c0265385c2) service to localhost/127.0.0.1:39957 2024-11-12T14:32:45,561 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data3/current/BP-1644289805-172.17.0.3-1731421953022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:45,562 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data4/current/BP-1644289805-172.17.0.3-1731421953022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:45,562 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T14:32:45,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:45,566 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T14:32:45,566 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T14:32:45,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T14:32:45,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir/,STOPPED} 2024-11-12T14:32:45,568 WARN [BP-1644289805-172.17.0.3-1731421953022 heartbeating to localhost/127.0.0.1:39957 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T14:32:45,568 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T14:32:45,568 WARN [BP-1644289805-172.17.0.3-1731421953022 heartbeating to localhost/127.0.0.1:39957 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1644289805-172.17.0.3-1731421953022 (Datanode Uuid 34f4acec-26f1-426b-af11-e85dcfae0493) service to localhost/127.0.0.1:39957 2024-11-12T14:32:45,568 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T14:32:45,569 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data1/current/BP-1644289805-172.17.0.3-1731421953022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:45,569 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/cluster_743bddca-9325-4fa0-dfd2-bb76264b22df/data/data2/current/BP-1644289805-172.17.0.3-1731421953022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:45,570 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T14:32:45,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T14:32:45,581 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T14:32:45,581 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T14:32:45,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T14:32:45,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir/,STOPPED} 2024-11-12T14:32:45,589 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T14:32:45,617 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T14:32:45,622 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=87 (was 157), OpenFileDescriptor=447 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=149 (was 162), ProcessCount=11 (was 11), AvailableMemoryMB=7406 (was 7724) 2024-11-12T14:32:45,628 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=87, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=149, ProcessCount=11, AvailableMemoryMB=7406 2024-11-12T14:32:45,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-12T14:32:45,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.log.dir so I do NOT create it in target/test-data/4414b9c4-e706-08f0-2176-8099566275dc 2024-11-12T14:32:45,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3500c02b-a19d-7939-58e3-d3dd5c1fb3ef/hadoop.tmp.dir so I do NOT create it in target/test-data/4414b9c4-e706-08f0-2176-8099566275dc 2024-11-12T14:32:45,629 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0, deleteOnExit=true 2024-11-12T14:32:45,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-12T14:32:45,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/test.cache.data in system properties and HBase conf 2024-11-12T14:32:45,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.tmp.dir in system properties and HBase conf 2024-11-12T14:32:45,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.log.dir in system properties and HBase conf 2024-11-12T14:32:45,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-12T14:32:45,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-12T14:32:45,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-12T14:32:45,629 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/nfs.dump.dir in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/java.io.tmpdir in system properties and HBase conf 2024-11-12T14:32:45,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-12T14:32:45,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-12T14:32:45,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-12T14:32:45,998 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:46,003 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T14:32:46,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T14:32:46,005 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T14:32:46,005 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T14:32:46,006 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:46,006 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b7198f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.log.dir/,AVAILABLE} 2024-11-12T14:32:46,006 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@722f6ac4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T14:32:46,101 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55791d09{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/java.io.tmpdir/jetty-localhost-36671-hadoop-hdfs-3_4_1-tests_jar-_-any-12952560025470643416/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T14:32:46,101 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13d23832{HTTP/1.1, (http/1.1)}{localhost:36671} 2024-11-12T14:32:46,102 INFO [Time-limited test {}] server.Server(415): Started @14861ms 2024-11-12T14:32:46,376 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-12T14:32:46,382 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T14:32:46,412 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T14:32:46,412 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-12T14:32:46,420 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:46,424 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T14:32:46,424 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T14:32:46,424 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T14:32:46,424 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-12T14:32:46,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f50f857{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.log.dir/,AVAILABLE} 2024-11-12T14:32:46,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@190e176c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T14:32:46,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16a06885{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/java.io.tmpdir/jetty-localhost-39589-hadoop-hdfs-3_4_1-tests_jar-_-any-6468195371027395001/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:46,518 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5921dad7{HTTP/1.1, (http/1.1)}{localhost:39589} 2024-11-12T14:32:46,518 INFO [Time-limited test {}] server.Server(415): Started @15278ms 2024-11-12T14:32:46,519 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T14:32:46,553 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:46,556 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T14:32:46,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T14:32:46,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T14:32:46,557 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T14:32:46,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@699139bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.log.dir/,AVAILABLE} 2024-11-12T14:32:46,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76d2c786{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T14:32:46,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6861a8ff{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/java.io.tmpdir/jetty-localhost-45417-hadoop-hdfs-3_4_1-tests_jar-_-any-9240892618756728608/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:46,652 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@618a6a3f{HTTP/1.1, (http/1.1)}{localhost:45417} 2024-11-12T14:32:46,653 INFO [Time-limited test {}] server.Server(415): Started @15412ms 2024-11-12T14:32:46,654 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T14:32:46,679 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-12T14:32:46,682 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-12T14:32:46,683 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-12T14:32:46,683 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-12T14:32:46,683 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-12T14:32:46,684 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4578d1ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.log.dir/,AVAILABLE} 2024-11-12T14:32:46,684 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@789867e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-12T14:32:46,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51ce3d64{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/java.io.tmpdir/jetty-localhost-40787-hadoop-hdfs-3_4_1-tests_jar-_-any-1412262851940666339/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:46,780 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@516c494d{HTTP/1.1, (http/1.1)}{localhost:40787} 2024-11-12T14:32:46,780 INFO [Time-limited test {}] server.Server(415): Started @15540ms 2024-11-12T14:32:46,784 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-12T14:32:48,460 WARN [Thread-567 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data1/current/BP-1959722420-172.17.0.3-1731421965654/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:48,460 WARN [Thread-568 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data2/current/BP-1959722420-172.17.0.3-1731421965654/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:48,477 WARN [Thread-507 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T14:32:48,480 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x72e58b1f646300d8 with lease ID 0x7fe3e49a7c9c4d58: Processing first storage report for DS-2bfcf099-0560-4123-8780-66b2db71dded from datanode DatanodeRegistration(127.0.0.1:33161, datanodeUuid=d6e2f150-a4dc-4c96-bffc-b017dff097f3, infoPort=42421, infoSecurePort=0, ipcPort=46627, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654) 2024-11-12T14:32:48,480 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x72e58b1f646300d8 with lease ID 0x7fe3e49a7c9c4d58: from storage DS-2bfcf099-0560-4123-8780-66b2db71dded node DatanodeRegistration(127.0.0.1:33161, datanodeUuid=d6e2f150-a4dc-4c96-bffc-b017dff097f3, infoPort=42421, infoSecurePort=0, ipcPort=46627, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:48,480 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x72e58b1f646300d8 with lease ID 0x7fe3e49a7c9c4d58: Processing first storage report for DS-f30bb707-f217-4391-82c4-eaea53169d90 from datanode DatanodeRegistration(127.0.0.1:33161, datanodeUuid=d6e2f150-a4dc-4c96-bffc-b017dff097f3, infoPort=42421, infoSecurePort=0, ipcPort=46627, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654) 2024-11-12T14:32:48,480 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x72e58b1f646300d8 with lease ID 0x7fe3e49a7c9c4d58: from storage DS-f30bb707-f217-4391-82c4-eaea53169d90 node DatanodeRegistration(127.0.0.1:33161, datanodeUuid=d6e2f150-a4dc-4c96-bffc-b017dff097f3, infoPort=42421, infoSecurePort=0, ipcPort=46627, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:48,626 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data3/current/BP-1959722420-172.17.0.3-1731421965654/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:48,626 WARN [Thread-579 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data4/current/BP-1959722420-172.17.0.3-1731421965654/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:48,648 WARN [Thread-530 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T14:32:48,651 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ab69ee346c44388 with lease ID 0x7fe3e49a7c9c4d59: Processing first storage report for DS-6cfb2083-f4a4-4c6b-ae16-1d927f1e4a46 from datanode DatanodeRegistration(127.0.0.1:44961, datanodeUuid=17295c7d-9416-448b-b94a-a31774becaa1, infoPort=40497, infoSecurePort=0, ipcPort=45199, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654) 2024-11-12T14:32:48,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ab69ee346c44388 with lease ID 0x7fe3e49a7c9c4d59: from storage DS-6cfb2083-f4a4-4c6b-ae16-1d927f1e4a46 node DatanodeRegistration(127.0.0.1:44961, datanodeUuid=17295c7d-9416-448b-b94a-a31774becaa1, infoPort=40497, infoSecurePort=0, ipcPort=45199, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:48,652 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ab69ee346c44388 with lease ID 0x7fe3e49a7c9c4d59: Processing first storage report for DS-9ef60c1c-1485-4b01-b0fd-0a0e19f61a63 from datanode DatanodeRegistration(127.0.0.1:44961, datanodeUuid=17295c7d-9416-448b-b94a-a31774becaa1, infoPort=40497, infoSecurePort=0, ipcPort=45199, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654) 2024-11-12T14:32:48,652 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ab69ee346c44388 with lease ID 0x7fe3e49a7c9c4d59: from storage DS-9ef60c1c-1485-4b01-b0fd-0a0e19f61a63 node DatanodeRegistration(127.0.0.1:44961, datanodeUuid=17295c7d-9416-448b-b94a-a31774becaa1, infoPort=40497, infoSecurePort=0, ipcPort=45199, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:48,675 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data5/current/BP-1959722420-172.17.0.3-1731421965654/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:48,675 WARN [Thread-590 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data6/current/BP-1959722420-172.17.0.3-1731421965654/current, will proceed with Du for space computation calculation, 2024-11-12T14:32:48,697 WARN [Thread-552 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-12T14:32:48,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b88eac035858bd0 with lease ID 0x7fe3e49a7c9c4d5a: Processing first storage report for DS-24abf3ba-92a0-4f53-97e4-c06c1de8784c from datanode DatanodeRegistration(127.0.0.1:35021, datanodeUuid=28052f7f-8bed-46cc-b640-9aff8117a2b6, infoPort=40147, infoSecurePort=0, ipcPort=34949, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654) 2024-11-12T14:32:48,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b88eac035858bd0 with lease ID 0x7fe3e49a7c9c4d5a: from storage DS-24abf3ba-92a0-4f53-97e4-c06c1de8784c node DatanodeRegistration(127.0.0.1:35021, datanodeUuid=28052f7f-8bed-46cc-b640-9aff8117a2b6, infoPort=40147, infoSecurePort=0, ipcPort=34949, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:48,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b88eac035858bd0 with lease ID 0x7fe3e49a7c9c4d5a: Processing first storage report for DS-d514aacc-e79d-4180-bb8c-a1fbc4d16971 from datanode DatanodeRegistration(127.0.0.1:35021, datanodeUuid=28052f7f-8bed-46cc-b640-9aff8117a2b6, infoPort=40147, infoSecurePort=0, ipcPort=34949, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654) 2024-11-12T14:32:48,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b88eac035858bd0 with lease ID 0x7fe3e49a7c9c4d5a: from storage DS-d514aacc-e79d-4180-bb8c-a1fbc4d16971 node DatanodeRegistration(127.0.0.1:35021, datanodeUuid=28052f7f-8bed-46cc-b640-9aff8117a2b6, infoPort=40147, infoSecurePort=0, ipcPort=34949, storageInfo=lv=-57;cid=testClusterID;nsid=306499710;c=1731421965654), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-12T14:32:48,731 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc 2024-11-12T14:32:48,734 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/zookeeper_0, clientPort=61410, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-12T14:32:48,736 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61410 2024-11-12T14:32:48,736 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:48,738 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:48,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741825_1001 (size=7) 2024-11-12T14:32:48,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741825_1001 (size=7) 2024-11-12T14:32:48,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741825_1001 (size=7) 2024-11-12T14:32:48,754 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388 with version=8 2024-11-12T14:32:48,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39957/user/jenkins/test-data/576cc502-ad6b-eaf4-e45d-71f5824f4ef0/hbase-staging 2024-11-12T14:32:48,757 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2b6d221c5cde:0 server-side Connection retries=45 2024-11-12T14:32:48,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:48,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:48,757 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T14:32:48,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:48,757 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T14:32:48,757 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-12T14:32:48,757 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T14:32:48,758 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38757 2024-11-12T14:32:48,759 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38757 connecting to ZooKeeper ensemble=127.0.0.1:61410 2024-11-12T14:32:48,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:387570x0, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T14:32:48,810 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38757-0x1012f7695090000 connected 2024-11-12T14:32:48,896 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:48,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:48,904 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:48,904 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388, hbase.cluster.distributed=false 2024-11-12T14:32:48,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T14:32:48,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38757 2024-11-12T14:32:48,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38757 2024-11-12T14:32:48,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38757 2024-11-12T14:32:48,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38757 2024-11-12T14:32:48,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38757 2024-11-12T14:32:48,925 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2b6d221c5cde:0 server-side Connection retries=45 2024-11-12T14:32:48,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:48,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:48,926 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T14:32:48,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:48,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T14:32:48,926 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T14:32:48,926 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T14:32:48,927 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38471 2024-11-12T14:32:48,928 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38471 connecting to ZooKeeper ensemble=127.0.0.1:61410 2024-11-12T14:32:48,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:48,931 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:48,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384710x0, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T14:32:48,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38471-0x1012f7695090001 connected 2024-11-12T14:32:48,946 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:48,947 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T14:32:48,948 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T14:32:48,949 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T14:32:48,951 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T14:32:48,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38471 2024-11-12T14:32:48,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38471 2024-11-12T14:32:48,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38471 2024-11-12T14:32:48,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38471 2024-11-12T14:32:48,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38471 2024-11-12T14:32:48,972 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2b6d221c5cde:0 server-side Connection retries=45 2024-11-12T14:32:48,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:48,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:48,972 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T14:32:48,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:48,972 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T14:32:48,972 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T14:32:48,972 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T14:32:48,973 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45879 2024-11-12T14:32:48,974 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45879 connecting to ZooKeeper ensemble=127.0.0.1:61410 2024-11-12T14:32:48,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:48,976 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:48,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458790x0, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T14:32:48,990 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45879-0x1012f7695090002 connected 2024-11-12T14:32:48,990 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:48,991 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T14:32:48,992 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T14:32:48,993 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T14:32:48,994 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T14:32:48,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45879 2024-11-12T14:32:48,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45879 2024-11-12T14:32:48,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45879 2024-11-12T14:32:48,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45879 2024-11-12T14:32:48,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45879 2024-11-12T14:32:49,013 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2b6d221c5cde:0 server-side Connection retries=45 2024-11-12T14:32:49,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:49,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:49,014 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-12T14:32:49,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-12T14:32:49,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-12T14:32:49,014 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-12T14:32:49,014 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-12T14:32:49,015 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33709 2024-11-12T14:32:49,016 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33709 connecting to ZooKeeper ensemble=127.0.0.1:61410 2024-11-12T14:32:49,017 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:49,018 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:49,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:337090x0, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-12T14:32:49,033 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:337090x0, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:49,033 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33709-0x1012f7695090003 connected 2024-11-12T14:32:49,033 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-12T14:32:49,034 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-12T14:32:49,034 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-12T14:32:49,035 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-12T14:32:49,036 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33709 2024-11-12T14:32:49,036 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33709 2024-11-12T14:32:49,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33709 2024-11-12T14:32:49,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33709 2024-11-12T14:32:49,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33709 2024-11-12T14:32:49,052 DEBUG [M:0;2b6d221c5cde:38757 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2b6d221c5cde:38757 2024-11-12T14:32:49,052 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:49,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:49,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:49,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:49,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:49,064 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:49,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T14:32:49,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T14:32:49,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-12T14:32:49,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,075 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-12T14:32:49,075 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2b6d221c5cde,38757,1731421968756 from backup master directory 2024-11-12T14:32:49,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:49,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:49,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:49,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:49,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-12T14:32:49,085 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T14:32:49,085 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:49,091 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/hbase.id] with ID: 0aabd171-3d21-43f8-bd2d-700326e10a6f 2024-11-12T14:32:49,091 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/.tmp/hbase.id 2024-11-12T14:32:49,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741826_1002 (size=42) 2024-11-12T14:32:49,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741826_1002 (size=42) 2024-11-12T14:32:49,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741826_1002 (size=42) 2024-11-12T14:32:49,102 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/.tmp/hbase.id]:[hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/hbase.id] 2024-11-12T14:32:49,117 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-12T14:32:49,117 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-12T14:32:49,119 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-12T14:32:49,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741827_1003 (size=196) 2024-11-12T14:32:49,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741827_1003 (size=196) 2024-11-12T14:32:49,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741827_1003 (size=196) 2024-11-12T14:32:49,138 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T14:32:49,139 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-12T14:32:49,139 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T14:32:49,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741828_1004 (size=1189) 2024-11-12T14:32:49,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741828_1004 (size=1189) 2024-11-12T14:32:49,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741828_1004 (size=1189) 2024-11-12T14:32:49,153 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store 2024-11-12T14:32:49,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741829_1005 (size=34) 2024-11-12T14:32:49,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741829_1005 (size=34) 2024-11-12T14:32:49,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741829_1005 (size=34) 2024-11-12T14:32:49,164 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:49,164 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T14:32:49,164 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:49,164 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:49,164 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T14:32:49,164 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:49,164 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:49,164 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731421969164Disabling compacts and flushes for region at 1731421969164Disabling writes for close at 1731421969164Writing region close event to WAL at 1731421969164Closed at 1731421969164 2024-11-12T14:32:49,165 WARN [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/.initializing 2024-11-12T14:32:49,165 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/WALs/2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:49,169 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C38757%2C1731421968756, suffix=, logDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/WALs/2b6d221c5cde,38757,1731421968756, archiveDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/oldWALs, maxLogs=10 2024-11-12T14:32:49,170 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2b6d221c5cde%2C38757%2C1731421968756.1731421969169 2024-11-12T14:32:49,179 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/WALs/2b6d221c5cde,38757,1731421968756/2b6d221c5cde%2C38757%2C1731421968756.1731421969169 2024-11-12T14:32:49,183 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40497:40497),(127.0.0.1/127.0.0.1:42421:42421),(127.0.0.1/127.0.0.1:40147:40147)] 2024-11-12T14:32:49,183 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-12T14:32:49,183 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:49,184 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,184 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-12T14:32:49,187 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:49,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:49,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-12T14:32:49,189 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:49,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T14:32:49,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-12T14:32:49,192 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:49,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T14:32:49,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-12T14:32:49,195 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:49,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T14:32:49,196 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,197 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,198 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,199 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,199 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,200 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T14:32:49,201 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-12T14:32:49,204 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T14:32:49,204 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69211198, jitterRate=0.03132721781730652}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T14:32:49,205 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731421969184Initializing all the Stores at 1731421969185 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421969185Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421969185Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421969185Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421969185Cleaning up temporary data from old regions at 1731421969199 (+14 ms)Region opened successfully at 1731421969205 (+6 ms) 2024-11-12T14:32:49,206 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-12T14:32:49,211 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8fb898, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b6d221c5cde/172.17.0.3:0 2024-11-12T14:32:49,212 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-12T14:32:49,212 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-12T14:32:49,212 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-12T14:32:49,212 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-12T14:32:49,213 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-12T14:32:49,214 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-12T14:32:49,214 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-12T14:32:49,217 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-12T14:32:49,218 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-12T14:32:49,230 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-12T14:32:49,230 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-12T14:32:49,231 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-12T14:32:49,242 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-12T14:32:49,243 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-12T14:32:49,244 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-12T14:32:49,253 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-12T14:32:49,254 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-12T14:32:49,263 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-12T14:32:49,266 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-12T14:32:49,274 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-12T14:32:49,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:49,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:49,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:49,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:49,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,287 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2b6d221c5cde,38757,1731421968756, sessionid=0x1012f7695090000, setting cluster-up flag (Was=false) 2024-11-12T14:32:49,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,337 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-12T14:32:49,340 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:49,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,390 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-12T14:32:49,391 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:49,393 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-12T14:32:49,396 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-12T14:32:49,396 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-12T14:32:49,396 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-12T14:32:49,396 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2b6d221c5cde,38757,1731421968756 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-12T14:32:49,398 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2b6d221c5cde:0, corePoolSize=5, maxPoolSize=5 2024-11-12T14:32:49,398 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2b6d221c5cde:0, corePoolSize=5, maxPoolSize=5 2024-11-12T14:32:49,398 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=5, maxPoolSize=5 2024-11-12T14:32:49,398 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=5, maxPoolSize=5 2024-11-12T14:32:49,398 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2b6d221c5cde:0, corePoolSize=10, maxPoolSize=10 2024-11-12T14:32:49,398 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,398 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=2, maxPoolSize=2 2024-11-12T14:32:49,398 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,399 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731421999399 2024-11-12T14:32:49,399 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-12T14:32:49,399 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-12T14:32:49,399 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-12T14:32:49,399 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-12T14:32:49,399 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-12T14:32:49,399 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-12T14:32:49,400 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,400 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-12T14:32:49,400 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-12T14:32:49,400 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-12T14:32:49,400 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T14:32:49,401 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-12T14:32:49,401 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-12T14:32:49,401 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-12T14:32:49,401 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.large.0-1731421969401,5,FailOnTimeoutGroup] 2024-11-12T14:32:49,403 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.small.0-1731421969401,5,FailOnTimeoutGroup] 2024-11-12T14:32:49,403 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:49,403 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,403 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-12T14:32:49,403 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,403 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,403 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-12T14:32:49,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741831_1007 (size=1321) 2024-11-12T14:32:49,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741831_1007 (size=1321) 2024-11-12T14:32:49,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741831_1007 (size=1321) 2024-11-12T14:32:49,413 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-12T14:32:49,414 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388 2024-11-12T14:32:49,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741832_1008 (size=32) 2024-11-12T14:32:49,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741832_1008 (size=32) 2024-11-12T14:32:49,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741832_1008 (size=32) 2024-11-12T14:32:49,423 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:49,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T14:32:49,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T14:32:49,426 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:49,426 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:49,426 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T14:32:49,428 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T14:32:49,428 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:49,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:49,429 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T14:32:49,430 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T14:32:49,430 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:49,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:49,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T14:32:49,433 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T14:32:49,433 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:49,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:49,434 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T14:32:49,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740 2024-11-12T14:32:49,436 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740 2024-11-12T14:32:49,437 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T14:32:49,437 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T14:32:49,438 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T14:32:49,440 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(746): ClusterId : 0aabd171-3d21-43f8-bd2d-700326e10a6f 2024-11-12T14:32:49,440 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(746): ClusterId : 0aabd171-3d21-43f8-bd2d-700326e10a6f 2024-11-12T14:32:49,440 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T14:32:49,440 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T14:32:49,440 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(746): ClusterId : 0aabd171-3d21-43f8-bd2d-700326e10a6f 2024-11-12T14:32:49,440 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-12T14:32:49,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T14:32:49,443 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T14:32:49,444 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67910976, jitterRate=0.011952400207519531}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T14:32:49,444 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731421969423Initializing all the Stores at 1731421969424 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421969424Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421969424Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421969424Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421969424Cleaning up temporary data from old regions at 1731421969437 (+13 ms)Region opened successfully at 1731421969444 (+7 ms) 2024-11-12T14:32:49,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T14:32:49,445 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T14:32:49,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T14:32:49,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T14:32:49,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T14:32:49,445 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T14:32:49,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731421969445Disabling compacts and flushes for region at 1731421969445Disabling writes for close at 1731421969445Writing region close event to WAL at 1731421969445Closed at 1731421969445 2024-11-12T14:32:49,447 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T14:32:49,447 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-12T14:32:49,448 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-12T14:32:49,450 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T14:32:49,452 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-12T14:32:49,462 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T14:32:49,462 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T14:32:49,462 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T14:32:49,462 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T14:32:49,462 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-12T14:32:49,462 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-12T14:32:49,475 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T14:32:49,475 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T14:32:49,475 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-12T14:32:49,475 DEBUG [RS:2;2b6d221c5cde:33709 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c0b49a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b6d221c5cde/172.17.0.3:0 2024-11-12T14:32:49,475 DEBUG [RS:0;2b6d221c5cde:38471 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a4c784c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b6d221c5cde/172.17.0.3:0 2024-11-12T14:32:49,475 DEBUG [RS:1;2b6d221c5cde:45879 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@525a8609, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b6d221c5cde/172.17.0.3:0 2024-11-12T14:32:49,486 DEBUG [RS:2;2b6d221c5cde:33709 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;2b6d221c5cde:33709 2024-11-12T14:32:49,486 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T14:32:49,486 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T14:32:49,486 DEBUG [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T14:32:49,487 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(2659): reportForDuty to master=2b6d221c5cde,38757,1731421968756 with port=33709, startcode=1731421969013 2024-11-12T14:32:49,487 DEBUG [RS:2;2b6d221c5cde:33709 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T14:32:49,489 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53909, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T14:32:49,490 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38757 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:49,490 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38757 {}] master.ServerManager(517): Registering regionserver=2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:49,492 DEBUG [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388 2024-11-12T14:32:49,493 DEBUG [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35177 2024-11-12T14:32:49,493 DEBUG [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T14:32:49,494 DEBUG [RS:1;2b6d221c5cde:45879 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2b6d221c5cde:45879 2024-11-12T14:32:49,494 DEBUG [RS:0;2b6d221c5cde:38471 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2b6d221c5cde:38471 2024-11-12T14:32:49,494 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T14:32:49,494 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-12T14:32:49,494 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T14:32:49,494 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-12T14:32:49,494 DEBUG [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T14:32:49,494 DEBUG [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-12T14:32:49,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T14:32:49,506 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(2659): reportForDuty to master=2b6d221c5cde,38757,1731421968756 with port=45879, startcode=1731421968972 2024-11-12T14:32:49,506 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(2659): reportForDuty to master=2b6d221c5cde,38757,1731421968756 with port=38471, startcode=1731421968925 2024-11-12T14:32:49,506 DEBUG [RS:2;2b6d221c5cde:33709 {}] zookeeper.ZKUtil(111): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:49,506 WARN [RS:2;2b6d221c5cde:33709 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T14:32:49,506 DEBUG [RS:1;2b6d221c5cde:45879 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T14:32:49,506 DEBUG [RS:0;2b6d221c5cde:38471 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-12T14:32:49,506 INFO [RS:2;2b6d221c5cde:33709 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T14:32:49,506 DEBUG [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:49,506 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2b6d221c5cde,33709,1731421969013] 2024-11-12T14:32:49,508 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57459, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T14:32:49,508 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36887, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-12T14:32:49,509 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38757 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:49,509 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38757 {}] master.ServerManager(517): Registering regionserver=2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:49,511 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38757 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2b6d221c5cde,45879,1731421968972 2024-11-12T14:32:49,511 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T14:32:49,511 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38757 {}] master.ServerManager(517): Registering regionserver=2b6d221c5cde,45879,1731421968972 2024-11-12T14:32:49,511 DEBUG [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388 2024-11-12T14:32:49,511 DEBUG [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35177 2024-11-12T14:32:49,511 DEBUG [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T14:32:49,514 DEBUG [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388 2024-11-12T14:32:49,514 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T14:32:49,514 DEBUG [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35177 2024-11-12T14:32:49,514 DEBUG [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-12T14:32:49,514 INFO [RS:2;2b6d221c5cde:33709 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T14:32:49,514 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,514 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T14:32:49,515 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T14:32:49,515 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2b6d221c5cde:0, corePoolSize=2, maxPoolSize=2 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,516 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,517 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,517 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:49,517 DEBUG [RS:2;2b6d221c5cde:33709 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:49,520 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,520 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,520 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,520 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,520 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,521 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,33709,1731421969013-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T14:32:49,533 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T14:32:49,533 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,33709,1731421969013-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,534 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,534 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.Replication(171): 2b6d221c5cde,33709,1731421969013 started 2024-11-12T14:32:49,546 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,546 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(1482): Serving as 2b6d221c5cde,33709,1731421969013, RpcServer on 2b6d221c5cde/172.17.0.3:33709, sessionid=0x1012f7695090003 2024-11-12T14:32:49,546 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T14:32:49,546 DEBUG [RS:2;2b6d221c5cde:33709 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:49,546 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,33709,1731421969013' 2024-11-12T14:32:49,546 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T14:32:49,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T14:32:49,569 DEBUG [RS:0;2b6d221c5cde:38471 {}] zookeeper.ZKUtil(111): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:49,569 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T14:32:49,569 WARN [RS:0;2b6d221c5cde:38471 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T14:32:49,569 INFO [RS:0;2b6d221c5cde:38471 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T14:32:49,569 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2b6d221c5cde,45879,1731421968972] 2024-11-12T14:32:49,569 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2b6d221c5cde,38471,1731421968925] 2024-11-12T14:32:49,569 DEBUG [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:49,569 DEBUG [RS:1;2b6d221c5cde:45879 {}] zookeeper.ZKUtil(111): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2b6d221c5cde,45879,1731421968972 2024-11-12T14:32:49,570 WARN [RS:1;2b6d221c5cde:45879 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-12T14:32:49,570 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T14:32:49,570 INFO [RS:1;2b6d221c5cde:45879 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T14:32:49,570 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T14:32:49,570 DEBUG [RS:2;2b6d221c5cde:33709 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:49,570 DEBUG [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,45879,1731421968972 2024-11-12T14:32:49,570 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,33709,1731421969013' 2024-11-12T14:32:49,570 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T14:32:49,571 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T14:32:49,572 DEBUG [RS:2;2b6d221c5cde:33709 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T14:32:49,572 INFO [RS:2;2b6d221c5cde:33709 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T14:32:49,572 INFO [RS:2;2b6d221c5cde:33709 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T14:32:49,575 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T14:32:49,576 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-12T14:32:49,579 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T14:32:49,580 INFO [RS:0;2b6d221c5cde:38471 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T14:32:49,580 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,580 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T14:32:49,581 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T14:32:49,581 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-12T14:32:49,581 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,581 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,581 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,581 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,581 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,581 INFO [RS:1;2b6d221c5cde:45879 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,581 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2b6d221c5cde:0, corePoolSize=2, maxPoolSize=2 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:49,582 DEBUG [RS:0;2b6d221c5cde:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:49,584 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-12T14:32:49,584 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,584 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,585 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,585 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,585 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,585 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,38471,1731421968925-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T14:32:49,585 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-12T14:32:49,585 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,585 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,585 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,585 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2b6d221c5cde:0, corePoolSize=2, maxPoolSize=2 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2b6d221c5cde:0, corePoolSize=1, maxPoolSize=1 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:49,586 DEBUG [RS:1;2b6d221c5cde:45879 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0, corePoolSize=3, maxPoolSize=3 2024-11-12T14:32:49,589 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,589 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,589 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,589 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,589 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,589 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,45879,1731421968972-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T14:32:49,600 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T14:32:49,600 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,38471,1731421968925-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,600 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,600 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.Replication(171): 2b6d221c5cde,38471,1731421968925 started 2024-11-12T14:32:49,602 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-12T14:32:49,602 WARN [2b6d221c5cde:38757 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-12T14:32:49,602 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,45879,1731421968972-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,602 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,602 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.Replication(171): 2b6d221c5cde,45879,1731421968972 started 2024-11-12T14:32:49,613 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,613 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(1482): Serving as 2b6d221c5cde,38471,1731421968925, RpcServer on 2b6d221c5cde/172.17.0.3:38471, sessionid=0x1012f7695090001 2024-11-12T14:32:49,613 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T14:32:49,613 DEBUG [RS:0;2b6d221c5cde:38471 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:49,613 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,38471,1731421968925' 2024-11-12T14:32:49,613 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T14:32:49,614 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T14:32:49,614 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T14:32:49,614 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T14:32:49,614 DEBUG [RS:0;2b6d221c5cde:38471 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:49,614 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,38471,1731421968925' 2024-11-12T14:32:49,614 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T14:32:49,614 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:49,615 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(1482): Serving as 2b6d221c5cde,45879,1731421968972, RpcServer on 2b6d221c5cde/172.17.0.3:45879, sessionid=0x1012f7695090002 2024-11-12T14:32:49,615 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-12T14:32:49,615 DEBUG [RS:1;2b6d221c5cde:45879 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2b6d221c5cde,45879,1731421968972 2024-11-12T14:32:49,615 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,45879,1731421968972' 2024-11-12T14:32:49,615 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-12T14:32:49,615 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T14:32:49,615 DEBUG [RS:0;2b6d221c5cde:38471 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T14:32:49,615 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-12T14:32:49,615 INFO [RS:0;2b6d221c5cde:38471 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T14:32:49,615 INFO [RS:0;2b6d221c5cde:38471 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T14:32:49,616 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-12T14:32:49,616 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-12T14:32:49,616 DEBUG [RS:1;2b6d221c5cde:45879 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2b6d221c5cde,45879,1731421968972 2024-11-12T14:32:49,616 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b6d221c5cde,45879,1731421968972' 2024-11-12T14:32:49,616 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-12T14:32:49,616 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-12T14:32:49,617 DEBUG [RS:1;2b6d221c5cde:45879 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-12T14:32:49,617 INFO [RS:1;2b6d221c5cde:45879 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-12T14:32:49,617 INFO [RS:1;2b6d221c5cde:45879 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-12T14:32:49,677 INFO [RS:2;2b6d221c5cde:33709 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C33709%2C1731421969013, suffix=, logDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,33709,1731421969013, archiveDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/oldWALs, maxLogs=32 2024-11-12T14:32:49,683 INFO [RS:2;2b6d221c5cde:33709 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2b6d221c5cde%2C33709%2C1731421969013.1731421969682 2024-11-12T14:32:49,692 INFO [RS:2;2b6d221c5cde:33709 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,33709,1731421969013/2b6d221c5cde%2C33709%2C1731421969013.1731421969682 2024-11-12T14:32:49,693 DEBUG [RS:2;2b6d221c5cde:33709 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42421:42421),(127.0.0.1/127.0.0.1:40147:40147),(127.0.0.1/127.0.0.1:40497:40497)] 2024-11-12T14:32:49,718 INFO [RS:0;2b6d221c5cde:38471 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C38471%2C1731421968925, suffix=, logDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,38471,1731421968925, archiveDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/oldWALs, maxLogs=32 2024-11-12T14:32:49,719 INFO [RS:1;2b6d221c5cde:45879 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C45879%2C1731421968972, suffix=, logDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,45879,1731421968972, archiveDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/oldWALs, maxLogs=32 2024-11-12T14:32:49,721 INFO [RS:0;2b6d221c5cde:38471 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2b6d221c5cde%2C38471%2C1731421968925.1731421969721 2024-11-12T14:32:49,721 INFO [RS:1;2b6d221c5cde:45879 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2b6d221c5cde%2C45879%2C1731421968972.1731421969721 2024-11-12T14:32:49,732 INFO [RS:1;2b6d221c5cde:45879 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,45879,1731421968972/2b6d221c5cde%2C45879%2C1731421968972.1731421969721 2024-11-12T14:32:49,733 DEBUG [RS:1;2b6d221c5cde:45879 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40147:40147),(127.0.0.1/127.0.0.1:42421:42421),(127.0.0.1/127.0.0.1:40497:40497)] 2024-11-12T14:32:49,733 INFO [RS:0;2b6d221c5cde:38471 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,38471,1731421968925/2b6d221c5cde%2C38471%2C1731421968925.1731421969721 2024-11-12T14:32:49,736 DEBUG [RS:0;2b6d221c5cde:38471 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40497:40497),(127.0.0.1/127.0.0.1:42421:42421),(127.0.0.1/127.0.0.1:40147:40147)] 2024-11-12T14:32:49,852 DEBUG [2b6d221c5cde:38757 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-12T14:32:49,852 DEBUG [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(204): Hosts are {2b6d221c5cde=0} racks are {/default-rack=0} 2024-11-12T14:32:49,855 DEBUG [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T14:32:49,855 DEBUG [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T14:32:49,855 DEBUG [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T14:32:49,855 DEBUG [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T14:32:49,855 DEBUG [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T14:32:49,855 DEBUG [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T14:32:49,855 INFO [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T14:32:49,855 INFO [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T14:32:49,855 INFO [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T14:32:49,855 DEBUG [2b6d221c5cde:38757 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T14:32:49,856 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:49,857 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2b6d221c5cde,38471,1731421968925, state=OPENING 2024-11-12T14:32:49,903 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-12T14:32:49,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:49,918 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:49,918 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:49,919 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-12T14:32:49,919 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:49,919 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:49,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2b6d221c5cde,38471,1731421968925}] 2024-11-12T14:32:50,076 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T14:32:50,079 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40317, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T14:32:50,086 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-12T14:32:50,087 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-12T14:32:50,089 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b6d221c5cde%2C38471%2C1731421968925.meta, suffix=.meta, logDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,38471,1731421968925, archiveDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/oldWALs, maxLogs=32 2024-11-12T14:32:50,091 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2b6d221c5cde%2C38471%2C1731421968925.meta.1731421970090.meta 2024-11-12T14:32:50,100 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/WALs/2b6d221c5cde,38471,1731421968925/2b6d221c5cde%2C38471%2C1731421968925.meta.1731421970090.meta 2024-11-12T14:32:50,103 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40147:40147),(127.0.0.1/127.0.0.1:42421:42421),(127.0.0.1/127.0.0.1:40497:40497)] 2024-11-12T14:32:50,108 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-12T14:32:50,109 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-12T14:32:50,109 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-12T14:32:50,109 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-12T14:32:50,109 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-12T14:32:50,109 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:50,109 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-12T14:32:50,109 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-12T14:32:50,111 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-12T14:32:50,112 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-12T14:32:50,112 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:50,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:50,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-12T14:32:50,114 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-12T14:32:50,114 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:50,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:50,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-12T14:32:50,116 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-12T14:32:50,116 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:50,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:50,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-12T14:32:50,117 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-12T14:32:50,117 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:50,118 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-12T14:32:50,118 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-12T14:32:50,119 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740 2024-11-12T14:32:50,120 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740 2024-11-12T14:32:50,122 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-12T14:32:50,122 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-12T14:32:50,123 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-12T14:32:50,124 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-12T14:32:50,125 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70906911, jitterRate=0.05659531056880951}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-12T14:32:50,125 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-12T14:32:50,127 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731421970109Writing region info on filesystem at 1731421970109Initializing all the Stores at 1731421970111 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421970111Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421970111Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421970111Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731421970111Cleaning up temporary data from old regions at 1731421970122 (+11 ms)Running coprocessor post-open hooks at 1731421970125 (+3 ms)Region opened successfully at 1731421970126 (+1 ms) 2024-11-12T14:32:50,128 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731421970076 2024-11-12T14:32:50,131 DEBUG [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-12T14:32:50,131 INFO [RS_OPEN_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-12T14:32:50,132 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:50,134 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2b6d221c5cde,38471,1731421968925, state=OPEN 2024-11-12T14:32:50,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T14:32:50,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T14:32:50,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T14:32:50,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-12T14:32:50,146 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:50,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:50,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:50,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:50,146 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-12T14:32:50,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-12T14:32:50,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2b6d221c5cde,38471,1731421968925 in 227 msec 2024-11-12T14:32:50,154 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-12T14:32:50,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 702 msec 2024-11-12T14:32:50,156 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-12T14:32:50,156 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-12T14:32:50,157 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T14:32:50,158 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2b6d221c5cde,38471,1731421968925, seqNum=-1] 2024-11-12T14:32:50,158 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T14:32:50,159 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58707, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T14:32:50,168 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 772 msec 2024-11-12T14:32:50,168 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731421970168, completionTime=-1 2024-11-12T14:32:50,168 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-12T14:32:50,168 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-12T14:32:50,170 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-12T14:32:50,170 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731422030170 2024-11-12T14:32:50,170 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731422090170 2024-11-12T14:32:50,170 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-12T14:32:50,171 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-12T14:32:50,171 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,38757,1731421968756-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:50,171 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,38757,1731421968756-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:50,172 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,38757,1731421968756-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:50,172 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2b6d221c5cde:38757, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:50,172 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:50,172 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:50,175 DEBUG [master/2b6d221c5cde:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-12T14:32:50,177 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.092sec 2024-11-12T14:32:50,177 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-12T14:32:50,177 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-12T14:32:50,177 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-12T14:32:50,177 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-12T14:32:50,177 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-12T14:32:50,177 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,38757,1731421968756-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-12T14:32:50,178 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,38757,1731421968756-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-12T14:32:50,180 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-12T14:32:50,180 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-12T14:32:50,180 INFO [master/2b6d221c5cde:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b6d221c5cde,38757,1731421968756-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-12T14:32:50,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77fe5d7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T14:32:50,240 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2b6d221c5cde,38757,-1 for getting cluster id 2024-11-12T14:32:50,240 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-12T14:32:50,242 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0aabd171-3d21-43f8-bd2d-700326e10a6f' 2024-11-12T14:32:50,242 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-12T14:32:50,243 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0aabd171-3d21-43f8-bd2d-700326e10a6f" 2024-11-12T14:32:50,243 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ec992bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T14:32:50,243 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2b6d221c5cde,38757,-1] 2024-11-12T14:32:50,244 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-12T14:32:50,244 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:50,245 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53980, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-12T14:32:50,246 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a22c530, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-12T14:32:50,247 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-12T14:32:50,248 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2b6d221c5cde,38471,1731421968925, seqNum=-1] 2024-11-12T14:32:50,249 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T14:32:50,251 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39592, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T14:32:50,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:50,254 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-12T14:32:50,255 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:50,255 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@ef038dc 2024-11-12T14:32:50,255 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-12T14:32:50,257 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53994, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-12T14:32:50,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-12T14:32:50,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-12T14:32:50,262 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-12T14:32:50,262 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:50,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-12T14:32:50,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T14:32:50,264 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-12T14:32:50,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741837_1013 (size=392) 2024-11-12T14:32:50,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741837_1013 (size=392) 2024-11-12T14:32:50,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741837_1013 (size=392) 2024-11-12T14:32:50,278 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b5053dc50907d1cc7df59e03bb7ade80, NAME => 'TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388 2024-11-12T14:32:50,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741838_1014 (size=51) 2024-11-12T14:32:50,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741838_1014 (size=51) 2024-11-12T14:32:50,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741838_1014 (size=51) 2024-11-12T14:32:50,292 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:50,292 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing b5053dc50907d1cc7df59e03bb7ade80, disabling compactions & flushes 2024-11-12T14:32:50,292 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:50,292 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:50,292 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. after waiting 0 ms 2024-11-12T14:32:50,292 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:50,292 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:50,292 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for b5053dc50907d1cc7df59e03bb7ade80: Waiting for close lock at 1731421970292Disabling compacts and flushes for region at 1731421970292Disabling writes for close at 1731421970292Writing region close event to WAL at 1731421970292Closed at 1731421970292 2024-11-12T14:32:50,294 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-12T14:32:50,294 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731421970294"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731421970294"}]},"ts":"1731421970294"} 2024-11-12T14:32:50,297 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-12T14:32:50,299 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-12T14:32:50,299 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731421970299"}]},"ts":"1731421970299"} 2024-11-12T14:32:50,303 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-12T14:32:50,303 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {2b6d221c5cde=0} racks are {/default-rack=0} 2024-11-12T14:32:50,304 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-12T14:32:50,304 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-12T14:32:50,304 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-12T14:32:50,304 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-12T14:32:50,304 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-12T14:32:50,304 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-12T14:32:50,304 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-12T14:32:50,304 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-12T14:32:50,304 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-12T14:32:50,304 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-12T14:32:50,304 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b5053dc50907d1cc7df59e03bb7ade80, ASSIGN}] 2024-11-12T14:32:50,306 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b5053dc50907d1cc7df59e03bb7ade80, ASSIGN 2024-11-12T14:32:50,308 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b5053dc50907d1cc7df59e03bb7ade80, ASSIGN; state=OFFLINE, location=2b6d221c5cde,33709,1731421969013; forceNewPlan=false, retain=false 2024-11-12T14:32:50,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T14:32:50,458 INFO [2b6d221c5cde:38757 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-12T14:32:50,459 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b5053dc50907d1cc7df59e03bb7ade80, regionState=OPENING, regionLocation=2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:50,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b5053dc50907d1cc7df59e03bb7ade80, ASSIGN because future has completed 2024-11-12T14:32:50,464 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b5053dc50907d1cc7df59e03bb7ade80, server=2b6d221c5cde,33709,1731421969013}] 2024-11-12T14:32:50,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T14:32:50,619 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-12T14:32:50,621 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48677, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-12T14:32:50,629 INFO [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:50,630 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b5053dc50907d1cc7df59e03bb7ade80, NAME => 'TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80.', STARTKEY => '', ENDKEY => ''} 2024-11-12T14:32:50,631 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,631 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-12T14:32:50,631 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,631 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,633 INFO [StoreOpener-b5053dc50907d1cc7df59e03bb7ade80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,635 INFO [StoreOpener-b5053dc50907d1cc7df59e03bb7ade80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b5053dc50907d1cc7df59e03bb7ade80 columnFamilyName cf 2024-11-12T14:32:50,635 DEBUG [StoreOpener-b5053dc50907d1cc7df59e03bb7ade80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-12T14:32:50,636 INFO [StoreOpener-b5053dc50907d1cc7df59e03bb7ade80-1 {}] regionserver.HStore(327): Store=b5053dc50907d1cc7df59e03bb7ade80/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-12T14:32:50,636 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,637 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/default/TestHBaseWalOnEC/b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,637 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/default/TestHBaseWalOnEC/b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,637 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,637 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,639 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,642 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/default/TestHBaseWalOnEC/b5053dc50907d1cc7df59e03bb7ade80/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-12T14:32:50,643 INFO [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b5053dc50907d1cc7df59e03bb7ade80; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64122036, jitterRate=-0.04450720548629761}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-12T14:32:50,643 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:50,644 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b5053dc50907d1cc7df59e03bb7ade80: Running coprocessor pre-open hook at 1731421970631Writing region info on filesystem at 1731421970631Initializing all the Stores at 1731421970633 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731421970633Cleaning up temporary data from old regions at 1731421970637 (+4 ms)Running coprocessor post-open hooks at 1731421970643 (+6 ms)Region opened successfully at 1731421970644 (+1 ms) 2024-11-12T14:32:50,645 INFO [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80., pid=6, masterSystemTime=1731421970618 2024-11-12T14:32:50,649 DEBUG [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:50,649 INFO [RS_OPEN_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:50,650 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b5053dc50907d1cc7df59e03bb7ade80, regionState=OPEN, openSeqNum=2, regionLocation=2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:50,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b5053dc50907d1cc7df59e03bb7ade80, server=2b6d221c5cde,33709,1731421969013 because future has completed 2024-11-12T14:32:50,661 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-12T14:32:50,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b5053dc50907d1cc7df59e03bb7ade80, server=2b6d221c5cde,33709,1731421969013 in 193 msec 2024-11-12T14:32:50,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-12T14:32:50,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b5053dc50907d1cc7df59e03bb7ade80, ASSIGN in 357 msec 2024-11-12T14:32:50,667 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-12T14:32:50,667 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731421970667"}]},"ts":"1731421970667"} 2024-11-12T14:32:50,671 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-12T14:32:50,673 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-12T14:32:50,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 415 msec 2024-11-12T14:32:50,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-12T14:32:50,895 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T14:32:50,895 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-12T14:32:50,896 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T14:32:50,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-12T14:32:50,899 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-12T14:32:50,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-12T14:32:50,903 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80., hostname=2b6d221c5cde,33709,1731421969013, seqNum=2] 2024-11-12T14:32:50,903 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-12T14:32:50,905 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39106, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-12T14:32:50,908 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-12T14:32:50,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-12T14:32:50,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T14:32:50,912 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-12T14:32:50,913 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-12T14:32:50,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-12T14:32:51,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T14:32:51,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33709 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-12T14:32:51,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:51,069 INFO [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing b5053dc50907d1cc7df59e03bb7ade80 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-12T14:32:51,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/default/TestHBaseWalOnEC/b5053dc50907d1cc7df59e03bb7ade80/.tmp/cf/7f42c257385c4675a1e77b814b3bb7d7 is 36, key is row/cf:cq/1731421970906/Put/seqid=0 2024-11-12T14:32:51,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741839_1015 (size=4787) 2024-11-12T14:32:51,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741839_1015 (size=4787) 2024-11-12T14:32:51,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741839_1015 (size=4787) 2024-11-12T14:32:51,100 INFO [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/default/TestHBaseWalOnEC/b5053dc50907d1cc7df59e03bb7ade80/.tmp/cf/7f42c257385c4675a1e77b814b3bb7d7 2024-11-12T14:32:51,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/default/TestHBaseWalOnEC/b5053dc50907d1cc7df59e03bb7ade80/.tmp/cf/7f42c257385c4675a1e77b814b3bb7d7 as hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/default/TestHBaseWalOnEC/b5053dc50907d1cc7df59e03bb7ade80/cf/7f42c257385c4675a1e77b814b3bb7d7 2024-11-12T14:32:51,117 INFO [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/default/TestHBaseWalOnEC/b5053dc50907d1cc7df59e03bb7ade80/cf/7f42c257385c4675a1e77b814b3bb7d7, entries=1, sequenceid=5, filesize=4.7 K 2024-11-12T14:32:51,119 INFO [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for b5053dc50907d1cc7df59e03bb7ade80 in 50ms, sequenceid=5, compaction requested=false 2024-11-12T14:32:51,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for b5053dc50907d1cc7df59e03bb7ade80: 2024-11-12T14:32:51,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:51,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b6d221c5cde:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-12T14:32:51,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-12T14:32:51,125 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-12T14:32:51,125 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 209 msec 2024-11-12T14:32:51,129 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 217 msec 2024-11-12T14:32:51,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38757 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-12T14:32:51,235 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-12T14:32:51,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-12T14:32:51,239 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T14:32:51,239 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:51,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:51,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:51,239 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-12T14:32:51,239 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-12T14:32:51,240 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=333692838, stopped=false 2024-11-12T14:32:51,240 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2b6d221c5cde,38757,1731421968756 2024-11-12T14:32:51,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:51,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:51,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:51,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-12T14:32:51,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:51,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:51,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:51,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:51,325 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T14:32:51,325 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-12T14:32:51,326 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:51,326 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:51,326 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:51,326 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:51,326 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:51,326 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2b6d221c5cde,38471,1731421968925' ***** 2024-11-12T14:32:51,326 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T14:32:51,326 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2b6d221c5cde,45879,1731421968972' ***** 2024-11-12T14:32:51,326 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-12T14:32:51,326 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T14:32:51,327 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2b6d221c5cde,33709,1731421969013' ***** 2024-11-12T14:32:51,327 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-12T14:32:51,327 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T14:32:51,327 INFO [RS:1;2b6d221c5cde:45879 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T14:32:51,327 INFO [RS:1;2b6d221c5cde:45879 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T14:32:51,327 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T14:32:51,327 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(959): stopping server 2b6d221c5cde,45879,1731421968972 2024-11-12T14:32:51,327 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T14:32:51,328 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T14:32:51,328 INFO [RS:1;2b6d221c5cde:45879 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2b6d221c5cde:45879. 2024-11-12T14:32:51,328 INFO [RS:0;2b6d221c5cde:38471 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T14:32:51,328 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T14:32:51,328 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-12T14:32:51,328 INFO [RS:0;2b6d221c5cde:38471 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T14:32:51,328 INFO [RS:2;2b6d221c5cde:33709 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-12T14:32:51,328 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-12T14:32:51,329 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(959): stopping server 2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:51,329 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T14:32:51,329 INFO [RS:2;2b6d221c5cde:33709 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-12T14:32:51,329 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(3091): Received CLOSE for b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:51,329 INFO [RS:0;2b6d221c5cde:38471 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2b6d221c5cde:38471. 2024-11-12T14:32:51,329 DEBUG [RS:1;2b6d221c5cde:45879 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:51,329 DEBUG [RS:1;2b6d221c5cde:45879 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:51,329 DEBUG [RS:0;2b6d221c5cde:38471 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:51,329 DEBUG [RS:0;2b6d221c5cde:38471 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:51,329 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(976): stopping server 2b6d221c5cde,45879,1731421968972; all regions closed. 2024-11-12T14:32:51,329 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(959): stopping server 2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:51,329 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T14:32:51,329 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T14:32:51,330 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T14:32:51,330 INFO [RS:2;2b6d221c5cde:33709 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;2b6d221c5cde:33709. 2024-11-12T14:32:51,330 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T14:32:51,330 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b5053dc50907d1cc7df59e03bb7ade80, disabling compactions & flushes 2024-11-12T14:32:51,330 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-12T14:32:51,330 INFO [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:51,330 DEBUG [RS:2;2b6d221c5cde:33709 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-12T14:32:51,330 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:51,330 DEBUG [RS:2;2b6d221c5cde:33709 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:51,330 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. after waiting 0 ms 2024-11-12T14:32:51,330 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T14:32:51,330 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:51,330 DEBUG [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(1325): Online Regions={b5053dc50907d1cc7df59e03bb7ade80=TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80.} 2024-11-12T14:32:51,330 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,330 DEBUG [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(1351): Waiting on b5053dc50907d1cc7df59e03bb7ade80 2024-11-12T14:32:51,331 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-12T14:32:51,331 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,331 DEBUG [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-12T14:32:51,331 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,331 DEBUG [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-12T14:32:51,331 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-12T14:32:51,331 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-12T14:32:51,331 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,331 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-12T14:32:51,331 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-12T14:32:51,331 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-12T14:32:51,331 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,331 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-12T14:32:51,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741835_1011 (size=93) 2024-11-12T14:32:51,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741835_1011 (size=93) 2024-11-12T14:32:51,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741835_1011 (size=93) 2024-11-12T14:32:51,339 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/default/TestHBaseWalOnEC/b5053dc50907d1cc7df59e03bb7ade80/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-12T14:32:51,339 DEBUG [RS:1;2b6d221c5cde:45879 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/oldWALs 2024-11-12T14:32:51,339 INFO [RS:1;2b6d221c5cde:45879 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2b6d221c5cde%2C45879%2C1731421968972:(num 1731421969721) 2024-11-12T14:32:51,340 DEBUG [RS:1;2b6d221c5cde:45879 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:51,340 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:51,340 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T14:32:51,340 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.ChoreService(370): Chore service for: regionserver/2b6d221c5cde:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T14:32:51,340 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T14:32:51,340 INFO [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:51,340 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T14:32:51,340 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T14:32:51,340 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b5053dc50907d1cc7df59e03bb7ade80: Waiting for close lock at 1731421971329Running coprocessor pre-close hooks at 1731421971330 (+1 ms)Disabling compacts and flushes for region at 1731421971330Disabling writes for close at 1731421971330Writing region close event to WAL at 1731421971334 (+4 ms)Running coprocessor post-close hooks at 1731421971340 (+6 ms)Closed at 1731421971340 2024-11-12T14:32:51,341 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T14:32:51,341 DEBUG [RS_CLOSE_REGION-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80. 2024-11-12T14:32:51,341 INFO [RS:1;2b6d221c5cde:45879 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45879 2024-11-12T14:32:51,341 INFO [regionserver/2b6d221c5cde:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T14:32:51,351 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/.tmp/info/5e473e5bc4e74022914ba5ff0b71c175 is 153, key is TestHBaseWalOnEC,,1731421970258.b5053dc50907d1cc7df59e03bb7ade80./info:regioninfo/1731421970650/Put/seqid=0 2024-11-12T14:32:51,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T14:32:51,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2b6d221c5cde,45879,1731421968972 2024-11-12T14:32:51,356 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T14:32:51,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741840_1016 (size=6637) 2024-11-12T14:32:51,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741840_1016 (size=6637) 2024-11-12T14:32:51,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741840_1016 (size=6637) 2024-11-12T14:32:51,359 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/.tmp/info/5e473e5bc4e74022914ba5ff0b71c175 2024-11-12T14:32:51,369 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2b6d221c5cde,45879,1731421968972] 2024-11-12T14:32:51,379 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2b6d221c5cde,45879,1731421968972 already deleted, retry=false 2024-11-12T14:32:51,379 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2b6d221c5cde,45879,1731421968972 expired; onlineServers=2 2024-11-12T14:32:51,381 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/.tmp/ns/39742593df2440d593c16f7ef71e9f75 is 43, key is default/ns:d/1731421970160/Put/seqid=0 2024-11-12T14:32:51,387 INFO [regionserver/2b6d221c5cde:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:51,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741841_1017 (size=5153) 2024-11-12T14:32:51,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741841_1017 (size=5153) 2024-11-12T14:32:51,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741841_1017 (size=5153) 2024-11-12T14:32:51,388 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/.tmp/ns/39742593df2440d593c16f7ef71e9f75 2024-11-12T14:32:51,391 INFO [regionserver/2b6d221c5cde:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:51,409 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/.tmp/table/222d17c244a14771a6073da01af436cc is 52, key is TestHBaseWalOnEC/table:state/1731421970667/Put/seqid=0 2024-11-12T14:32:51,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741842_1018 (size=5249) 2024-11-12T14:32:51,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741842_1018 (size=5249) 2024-11-12T14:32:51,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741842_1018 (size=5249) 2024-11-12T14:32:51,416 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/.tmp/table/222d17c244a14771a6073da01af436cc 2024-11-12T14:32:51,422 INFO [regionserver/2b6d221c5cde:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:51,424 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/.tmp/info/5e473e5bc4e74022914ba5ff0b71c175 as hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/info/5e473e5bc4e74022914ba5ff0b71c175 2024-11-12T14:32:51,432 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/info/5e473e5bc4e74022914ba5ff0b71c175, entries=10, sequenceid=11, filesize=6.5 K 2024-11-12T14:32:51,433 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/.tmp/ns/39742593df2440d593c16f7ef71e9f75 as hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/ns/39742593df2440d593c16f7ef71e9f75 2024-11-12T14:32:51,440 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/ns/39742593df2440d593c16f7ef71e9f75, entries=2, sequenceid=11, filesize=5.0 K 2024-11-12T14:32:51,441 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/.tmp/table/222d17c244a14771a6073da01af436cc as hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/table/222d17c244a14771a6073da01af436cc 2024-11-12T14:32:51,448 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/table/222d17c244a14771a6073da01af436cc, entries=2, sequenceid=11, filesize=5.1 K 2024-11-12T14:32:51,450 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false 2024-11-12T14:32:51,456 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-12T14:32:51,456 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-12T14:32:51,456 INFO [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-12T14:32:51,457 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731421971331Running coprocessor pre-close hooks at 1731421971331Disabling compacts and flushes for region at 1731421971331Disabling writes for close at 1731421971331Obtaining lock to block concurrent updates at 1731421971331Preparing flush snapshotting stores in 1588230740 at 1731421971331Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731421971332 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731421971333 (+1 ms)Flushing 1588230740/info: creating writer at 1731421971333Flushing 1588230740/info: appending metadata at 1731421971351 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731421971351Flushing 1588230740/ns: creating writer at 1731421971366 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731421971380 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731421971380Flushing 1588230740/table: creating writer at 1731421971395 (+15 ms)Flushing 1588230740/table: appending metadata at 1731421971409 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731421971409Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@165b96d4: reopening flushed file at 1731421971423 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e80f6ca: reopening flushed file at 1731421971432 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@100ce83e: reopening flushed file at 1731421971440 (+8 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=11, compaction requested=false at 1731421971450 (+10 ms)Writing region close event to WAL at 1731421971451 (+1 ms)Running coprocessor post-close hooks at 1731421971456 (+5 ms)Closed at 1731421971456 2024-11-12T14:32:51,457 DEBUG [RS_CLOSE_META-regionserver/2b6d221c5cde:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-12T14:32:51,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:51,469 INFO [RS:1;2b6d221c5cde:45879 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T14:32:51,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45879-0x1012f7695090002, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:51,469 INFO [RS:1;2b6d221c5cde:45879 {}] regionserver.HRegionServer(1031): Exiting; stopping=2b6d221c5cde,45879,1731421968972; zookeeper connection closed. 2024-11-12T14:32:51,469 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@46d66b6f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@46d66b6f 2024-11-12T14:32:51,522 INFO [regionserver/2b6d221c5cde:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-12T14:32:51,522 INFO [regionserver/2b6d221c5cde:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-12T14:32:51,531 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(976): stopping server 2b6d221c5cde,33709,1731421969013; all regions closed. 2024-11-12T14:32:51,531 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(976): stopping server 2b6d221c5cde,38471,1731421968925; all regions closed. 2024-11-12T14:32:51,531 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,531 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,531 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,532 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,532 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,532 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,532 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,532 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,532 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,532 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741836_1012 (size=2751) 2024-11-12T14:32:51,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741833_1009 (size=1298) 2024-11-12T14:32:51,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741833_1009 (size=1298) 2024-11-12T14:32:51,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741836_1012 (size=2751) 2024-11-12T14:32:51,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741836_1012 (size=2751) 2024-11-12T14:32:51,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741833_1009 (size=1298) 2024-11-12T14:32:51,540 DEBUG [RS:0;2b6d221c5cde:38471 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/oldWALs 2024-11-12T14:32:51,540 INFO [RS:0;2b6d221c5cde:38471 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2b6d221c5cde%2C38471%2C1731421968925.meta:.meta(num 1731421970090) 2024-11-12T14:32:51,541 DEBUG [RS:2;2b6d221c5cde:33709 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/oldWALs 2024-11-12T14:32:51,541 INFO [RS:2;2b6d221c5cde:33709 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2b6d221c5cde%2C33709%2C1731421969013:(num 1731421969682) 2024-11-12T14:32:51,541 DEBUG [RS:2;2b6d221c5cde:33709 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:51,541 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:51,541 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,541 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T14:32:51,541 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,541 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,541 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,541 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.ChoreService(370): Chore service for: regionserver/2b6d221c5cde:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-12T14:32:51,542 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,542 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-12T14:32:51,542 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-12T14:32:51,542 INFO [regionserver/2b6d221c5cde:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T14:32:51,542 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-12T14:32:51,542 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T14:32:51,542 INFO [RS:2;2b6d221c5cde:33709 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33709 2024-11-12T14:32:51,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741834_1010 (size=93) 2024-11-12T14:32:51,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741834_1010 (size=93) 2024-11-12T14:32:51,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2b6d221c5cde,33709,1731421969013 2024-11-12T14:32:51,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T14:32:51,548 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T14:32:51,548 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2b6d221c5cde,33709,1731421969013] 2024-11-12T14:32:51,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741834_1010 (size=93) 2024-11-12T14:32:51,550 DEBUG [RS:0;2b6d221c5cde:38471 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/oldWALs 2024-11-12T14:32:51,550 INFO [RS:0;2b6d221c5cde:38471 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2b6d221c5cde%2C38471%2C1731421968925:(num 1731421969721) 2024-11-12T14:32:51,550 DEBUG [RS:0;2b6d221c5cde:38471 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-12T14:32:51,550 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.LeaseManager(133): Closed leases 2024-11-12T14:32:51,550 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T14:32:51,551 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.ChoreService(370): Chore service for: regionserver/2b6d221c5cde:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-12T14:32:51,551 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T14:32:51,551 INFO [regionserver/2b6d221c5cde:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T14:32:51,551 INFO [RS:0;2b6d221c5cde:38471 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38471 2024-11-12T14:32:51,568 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2b6d221c5cde,33709,1731421969013 already deleted, retry=false 2024-11-12T14:32:51,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2b6d221c5cde,38471,1731421968925 2024-11-12T14:32:51,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-12T14:32:51,569 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2b6d221c5cde,33709,1731421969013 expired; onlineServers=1 2024-11-12T14:32:51,569 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T14:32:51,569 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2b6d221c5cde,38471,1731421968925] 2024-11-12T14:32:51,590 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2b6d221c5cde,38471,1731421968925 already deleted, retry=false 2024-11-12T14:32:51,590 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2b6d221c5cde,38471,1731421968925 expired; onlineServers=0 2024-11-12T14:32:51,590 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2b6d221c5cde,38757,1731421968756' ***** 2024-11-12T14:32:51,590 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-12T14:32:51,590 INFO [M:0;2b6d221c5cde:38757 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-12T14:32:51,590 INFO [M:0;2b6d221c5cde:38757 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-12T14:32:51,590 DEBUG [M:0;2b6d221c5cde:38757 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-12T14:32:51,590 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-12T14:32:51,590 DEBUG [M:0;2b6d221c5cde:38757 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-12T14:32:51,591 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.small.0-1731421969401 {}] cleaner.HFileCleaner(306): Exit Thread[master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.small.0-1731421969401,5,FailOnTimeoutGroup] 2024-11-12T14:32:51,591 DEBUG [master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.large.0-1731421969401 {}] cleaner.HFileCleaner(306): Exit Thread[master/2b6d221c5cde:0:becomeActiveMaster-HFileCleaner.large.0-1731421969401,5,FailOnTimeoutGroup] 2024-11-12T14:32:51,591 INFO [M:0;2b6d221c5cde:38757 {}] hbase.ChoreService(370): Chore service for: master/2b6d221c5cde:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-12T14:32:51,591 INFO [M:0;2b6d221c5cde:38757 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-12T14:32:51,592 DEBUG [M:0;2b6d221c5cde:38757 {}] master.HMaster(1795): Stopping service threads 2024-11-12T14:32:51,592 INFO [M:0;2b6d221c5cde:38757 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-12T14:32:51,592 INFO [M:0;2b6d221c5cde:38757 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-12T14:32:51,592 INFO [M:0;2b6d221c5cde:38757 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-12T14:32:51,592 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-12T14:32:51,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-12T14:32:51,600 DEBUG [M:0;2b6d221c5cde:38757 {}] zookeeper.ZKUtil(347): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-12T14:32:51,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-12T14:32:51,600 WARN [M:0;2b6d221c5cde:38757 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-12T14:32:51,601 INFO [M:0;2b6d221c5cde:38757 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/.lastflushedseqids 2024-11-12T14:32:51,603 WARN [IPC Server handler 0 on default port 35177 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T14:32:51,603 WARN [IPC Server handler 0 on default port 35177 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T14:32:51,604 WARN [IPC Server handler 0 on default port 35177 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T14:32:51,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741843_1019 (size=127) 2024-11-12T14:32:51,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741843_1019 (size=127) 2024-11-12T14:32:51,610 INFO [M:0;2b6d221c5cde:38757 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-12T14:32:51,610 INFO [M:0;2b6d221c5cde:38757 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-12T14:32:51,610 DEBUG [M:0;2b6d221c5cde:38757 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-12T14:32:51,610 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:51,610 DEBUG [M:0;2b6d221c5cde:38757 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:51,610 DEBUG [M:0;2b6d221c5cde:38757 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-12T14:32:51,610 DEBUG [M:0;2b6d221c5cde:38757 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:51,611 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-11-12T14:32:51,633 DEBUG [M:0;2b6d221c5cde:38757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0adde4ab82134adc9d1f67ab58a78069 is 82, key is hbase:meta,,1/info:regioninfo/1731421970132/Put/seqid=0 2024-11-12T14:32:51,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741844_1020 (size=5672) 2024-11-12T14:32:51,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741844_1020 (size=5672) 2024-11-12T14:32:51,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741844_1020 (size=5672) 2024-11-12T14:32:51,643 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0adde4ab82134adc9d1f67ab58a78069 2024-11-12T14:32:51,658 INFO [RS:2;2b6d221c5cde:33709 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T14:32:51,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:51,658 INFO [RS:2;2b6d221c5cde:33709 {}] regionserver.HRegionServer(1031): Exiting; stopping=2b6d221c5cde,33709,1731421969013; zookeeper connection closed. 2024-11-12T14:32:51,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33709-0x1012f7695090003, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:51,659 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3449f258 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3449f258 2024-11-12T14:32:51,671 DEBUG [M:0;2b6d221c5cde:38757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6473a71d1c804b8cbc964a28d4a64221 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731421970675/Put/seqid=0 2024-11-12T14:32:51,672 WARN [IPC Server handler 4 on default port 35177 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-12T14:32:51,672 WARN [IPC Server handler 4 on default port 35177 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-12T14:32:51,672 WARN [IPC Server handler 4 on default port 35177 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-12T14:32:51,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741845_1021 (size=6441) 2024-11-12T14:32:51,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741845_1021 (size=6441) 2024-11-12T14:32:51,678 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6473a71d1c804b8cbc964a28d4a64221 2024-11-12T14:32:51,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:51,679 INFO [RS:0;2b6d221c5cde:38471 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T14:32:51,679 INFO [RS:0;2b6d221c5cde:38471 {}] regionserver.HRegionServer(1031): Exiting; stopping=2b6d221c5cde,38471,1731421968925; zookeeper connection closed. 2024-11-12T14:32:51,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x1012f7695090001, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:51,680 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2cb50699 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2cb50699 2024-11-12T14:32:51,680 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-12T14:32:51,699 DEBUG [M:0;2b6d221c5cde:38757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/18096ca0d78c4b0eb5f81e863d16bbe2 is 69, key is 2b6d221c5cde,33709,1731421969013/rs:state/1731421969490/Put/seqid=0 2024-11-12T14:32:51,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741846_1022 (size=5294) 2024-11-12T14:32:51,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741846_1022 (size=5294) 2024-11-12T14:32:51,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741846_1022 (size=5294) 2024-11-12T14:32:51,713 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/18096ca0d78c4b0eb5f81e863d16bbe2 2024-11-12T14:32:51,721 DEBUG [M:0;2b6d221c5cde:38757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0adde4ab82134adc9d1f67ab58a78069 as hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0adde4ab82134adc9d1f67ab58a78069 2024-11-12T14:32:51,728 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0adde4ab82134adc9d1f67ab58a78069, entries=8, sequenceid=72, filesize=5.5 K 2024-11-12T14:32:51,730 DEBUG [M:0;2b6d221c5cde:38757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6473a71d1c804b8cbc964a28d4a64221 as hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6473a71d1c804b8cbc964a28d4a64221 2024-11-12T14:32:51,737 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6473a71d1c804b8cbc964a28d4a64221, entries=8, sequenceid=72, filesize=6.3 K 2024-11-12T14:32:51,739 DEBUG [M:0;2b6d221c5cde:38757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/18096ca0d78c4b0eb5f81e863d16bbe2 as hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/18096ca0d78c4b0eb5f81e863d16bbe2 2024-11-12T14:32:51,746 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35177/user/jenkins/test-data/1ba844e2-e862-8732-458c-c3d03c9ce388/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/18096ca0d78c4b0eb5f81e863d16bbe2, entries=3, sequenceid=72, filesize=5.2 K 2024-11-12T14:32:51,748 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=72, compaction requested=false 2024-11-12T14:32:51,749 INFO [M:0;2b6d221c5cde:38757 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-12T14:32:51,749 DEBUG [M:0;2b6d221c5cde:38757 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731421971610Disabling compacts and flushes for region at 1731421971610Disabling writes for close at 1731421971610Obtaining lock to block concurrent updates at 1731421971611 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731421971611Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1731421971611Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731421971613 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731421971613Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731421971633 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731421971633Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731421971650 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731421971670 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731421971670Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731421971684 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731421971698 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731421971698Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@767fbf38: reopening flushed file at 1731421971720 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4913c790: reopening flushed file at 1731421971729 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@492cf15f: reopening flushed file at 1731421971738 (+9 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=72, compaction requested=false at 1731421971748 (+10 ms)Writing region close event to WAL at 1731421971749 (+1 ms)Closed at 1731421971749 2024-11-12T14:32:51,750 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,750 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,750 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,750 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,750 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-12T14:32:51,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35021 is added to blk_1073741830_1006 (size=32695) 2024-11-12T14:32:51,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33161 is added to blk_1073741830_1006 (size=32695) 2024-11-12T14:32:51,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44961 is added to blk_1073741830_1006 (size=32695) 2024-11-12T14:32:51,754 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-12T14:32:51,754 INFO [M:0;2b6d221c5cde:38757 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-12T14:32:51,754 INFO [M:0;2b6d221c5cde:38757 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38757 2024-11-12T14:32:51,754 INFO [M:0;2b6d221c5cde:38757 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-12T14:32:51,867 INFO [M:0;2b6d221c5cde:38757 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-12T14:32:51,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:51,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38757-0x1012f7695090000, quorum=127.0.0.1:61410, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-12T14:32:51,872 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51ce3d64{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:51,872 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@516c494d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T14:32:51,873 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T14:32:51,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@789867e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T14:32:51,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4578d1ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.log.dir/,STOPPED} 2024-11-12T14:32:51,875 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T14:32:51,875 WARN [BP-1959722420-172.17.0.3-1731421965654 heartbeating to localhost/127.0.0.1:35177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T14:32:51,876 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T14:32:51,876 WARN [BP-1959722420-172.17.0.3-1731421965654 heartbeating to localhost/127.0.0.1:35177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1959722420-172.17.0.3-1731421965654 (Datanode Uuid 28052f7f-8bed-46cc-b640-9aff8117a2b6) service to localhost/127.0.0.1:35177 2024-11-12T14:32:51,877 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data5/current/BP-1959722420-172.17.0.3-1731421965654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:51,877 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data6/current/BP-1959722420-172.17.0.3-1731421965654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:51,878 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T14:32:51,880 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6861a8ff{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:51,880 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@618a6a3f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T14:32:51,880 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T14:32:51,881 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76d2c786{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T14:32:51,881 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@699139bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.log.dir/,STOPPED} 2024-11-12T14:32:51,882 WARN [BP-1959722420-172.17.0.3-1731421965654 heartbeating to localhost/127.0.0.1:35177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T14:32:51,882 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T14:32:51,882 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T14:32:51,882 WARN [BP-1959722420-172.17.0.3-1731421965654 heartbeating to localhost/127.0.0.1:35177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1959722420-172.17.0.3-1731421965654 (Datanode Uuid 17295c7d-9416-448b-b94a-a31774becaa1) service to localhost/127.0.0.1:35177 2024-11-12T14:32:51,883 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data3/current/BP-1959722420-172.17.0.3-1731421965654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:51,883 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data4/current/BP-1959722420-172.17.0.3-1731421965654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:51,883 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T14:32:51,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16a06885{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-12T14:32:51,886 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5921dad7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T14:32:51,886 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T14:32:51,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@190e176c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T14:32:51,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f50f857{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.log.dir/,STOPPED} 2024-11-12T14:32:51,887 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-12T14:32:51,887 WARN [BP-1959722420-172.17.0.3-1731421965654 heartbeating to localhost/127.0.0.1:35177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-12T14:32:51,887 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-12T14:32:51,887 WARN [BP-1959722420-172.17.0.3-1731421965654 heartbeating to localhost/127.0.0.1:35177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1959722420-172.17.0.3-1731421965654 (Datanode Uuid d6e2f150-a4dc-4c96-bffc-b017dff097f3) service to localhost/127.0.0.1:35177 2024-11-12T14:32:51,888 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data1/current/BP-1959722420-172.17.0.3-1731421965654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:51,889 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/cluster_ed030029-2380-9a28-9e14-bc3ab25a16b0/data/data2/current/BP-1959722420-172.17.0.3-1731421965654 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-12T14:32:51,889 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-12T14:32:51,894 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55791d09{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-12T14:32:51,894 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13d23832{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-12T14:32:51,895 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-12T14:32:51,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@722f6ac4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-12T14:32:51,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b7198f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4414b9c4-e706-08f0-2176-8099566275dc/hadoop.log.dir/,STOPPED} 2024-11-12T14:32:51,901 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-12T14:32:51,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-12T14:32:51,931 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=148 (was 87) - Thread LEAK? -, OpenFileDescriptor=516 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=157 (was 149) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7213 (was 7406)